Subversion Repositories Kolibri OS

Rev

Rev 2017 | Rev 2175 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. //#include <linux/console.h>
  29.  
  30. #include <drm/drmP.h>
  31. #include <drm/drm_crtc_helper.h>
  32. #include <drm/radeon_drm.h>
  33. #include "radeon_reg.h"
  34. #include "radeon.h"
  35. #include "atom.h"
  36. #include "display.h"
  37.  
  38. #include <drm/drm_pciids.h>
  39.  
  40.  
  41. int radeon_no_wb   =  1;
  42. int radeon_modeset = -1;
  43. int radeon_dynclks = -1;
  44. int radeon_r4xx_atom = 0;
  45. int radeon_agpmode = 0;
  46. int radeon_vram_limit = 0;
  47. int radeon_gart_size = 512; /* default gart size */
  48. int radeon_benchmarking = 0;
  49. int radeon_testing = 0;
  50. int radeon_connector_table = 0;
  51. int radeon_tv = 1;
  52. int radeon_new_pll = -1;
  53. int radeon_dynpm = -1;
  54. int radeon_audio = 1;
  55. int radeon_hw_i2c = 0;
  56. int radeon_pcie_gen2 = 0;
  57. int radeon_disp_priority = 0;
  58.  
  59. int irq_override = 0;
  60.  
  61.  
  62. extern display_t *rdisplay;
  63.  
  64. void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
  65. int init_display(struct radeon_device *rdev, videomode_t *mode);
  66. int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
  67.  
  68. int get_modes(videomode_t *mode, int *count);
  69. int set_user_mode(videomode_t *mode);
  70. int r100_2D_test(struct radeon_device *rdev);
  71.  
  72.  
  73.  /* Legacy VGA regions */
  74. #define VGA_RSRC_NONE          0x00
  75. #define VGA_RSRC_LEGACY_IO     0x01
  76. #define VGA_RSRC_LEGACY_MEM    0x02
  77. #define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
  78. /* Non-legacy access */
  79. #define VGA_RSRC_NORMAL_IO     0x04
  80. #define VGA_RSRC_NORMAL_MEM    0x08
  81.  
  82.  
  83. static const char radeon_family_name[][16] = {
  84.         "R100",
  85.         "RV100",
  86.         "RS100",
  87.         "RV200",
  88.         "RS200",
  89.         "R200",
  90.         "RV250",
  91.         "RS300",
  92.         "RV280",
  93.         "R300",
  94.         "R350",
  95.         "RV350",
  96.         "RV380",
  97.         "R420",
  98.         "R423",
  99.         "RV410",
  100.         "RS400",
  101.         "RS480",
  102.         "RS600",
  103.         "RS690",
  104.         "RS740",
  105.         "RV515",
  106.         "R520",
  107.         "RV530",
  108.         "RV560",
  109.         "RV570",
  110.         "R580",
  111.         "R600",
  112.         "RV610",
  113.         "RV630",
  114.         "RV670",
  115.         "RV620",
  116.         "RV635",
  117.         "RS780",
  118.         "RS880",
  119.         "RV770",
  120.         "RV730",
  121.         "RV710",
  122.         "RV740",
  123.         "CEDAR",
  124.         "REDWOOD",
  125.         "JUNIPER",
  126.         "CYPRESS",
  127.         "HEMLOCK",
  128.         "PALM",
  129.         "SUMO",
  130.         "SUMO2",
  131.         "BARTS",
  132.         "TURKS",
  133.         "CAICOS",
  134.         "CAYMAN",
  135.         "LAST",
  136. };
  137.  
  138. /*
  139.  * Clear GPU surface registers.
  140.  */
  141. void radeon_surface_init(struct radeon_device *rdev)
  142. {
  143.     /* FIXME: check this out */
  144.     if (rdev->family < CHIP_R600) {
  145.         int i;
  146.  
  147.                 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
  148.            radeon_clear_surface_reg(rdev, i);
  149.         }
  150.                 /* enable surfaces */
  151.                 WREG32(RADEON_SURFACE_CNTL, 0);
  152.     }
  153. }
  154.  
  155. /*
  156.  * GPU scratch registers helpers function.
  157.  */
  158. void radeon_scratch_init(struct radeon_device *rdev)
  159. {
  160.     int i;
  161.  
  162.     /* FIXME: check this out */
  163.     if (rdev->family < CHIP_R300) {
  164.         rdev->scratch.num_reg = 5;
  165.     } else {
  166.         rdev->scratch.num_reg = 7;
  167.     }
  168.         rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
  169.     for (i = 0; i < rdev->scratch.num_reg; i++) {
  170.         rdev->scratch.free[i] = true;
  171.                 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  172.     }
  173. }
  174.  
  175. int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
  176. {
  177.         int i;
  178.  
  179.         for (i = 0; i < rdev->scratch.num_reg; i++) {
  180.                 if (rdev->scratch.free[i]) {
  181.                         rdev->scratch.free[i] = false;
  182.                         *reg = rdev->scratch.reg[i];
  183.                         return 0;
  184.                 }
  185.         }
  186.         return -EINVAL;
  187. }
  188.  
  189. void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  190. {
  191.         int i;
  192.  
  193.         for (i = 0; i < rdev->scratch.num_reg; i++) {
  194.                 if (rdev->scratch.reg[i] == reg) {
  195.                         rdev->scratch.free[i] = true;
  196.                         return;
  197.                 }
  198.         }
  199. }
  200.  
  201. void radeon_wb_disable(struct radeon_device *rdev)
  202. {
  203.         int r;
  204.  
  205.         if (rdev->wb.wb_obj) {
  206.                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
  207.                 if (unlikely(r != 0))
  208.                         return;
  209.                 radeon_bo_kunmap(rdev->wb.wb_obj);
  210.                 radeon_bo_unpin(rdev->wb.wb_obj);
  211.                 radeon_bo_unreserve(rdev->wb.wb_obj);
  212.         }
  213.         rdev->wb.enabled = false;
  214. }
  215.  
  216. void radeon_wb_fini(struct radeon_device *rdev)
  217. {
  218.         radeon_wb_disable(rdev);
  219.         if (rdev->wb.wb_obj) {
  220.                 radeon_bo_unref(&rdev->wb.wb_obj);
  221.                 rdev->wb.wb = NULL;
  222.                 rdev->wb.wb_obj = NULL;
  223.         }
  224. }
  225.  
  226. int radeon_wb_init(struct radeon_device *rdev)
  227. {
  228.         int r;
  229.  
  230.         if (rdev->wb.wb_obj == NULL) {
  231.                 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
  232.                                 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
  233.                 if (r) {
  234.                         dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
  235.                         return r;
  236.                 }
  237.         }
  238.         r = radeon_bo_reserve(rdev->wb.wb_obj, false);
  239.         if (unlikely(r != 0)) {
  240.                 radeon_wb_fini(rdev);
  241.                 return r;
  242.         }
  243.         r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
  244.                           &rdev->wb.gpu_addr);
  245.         if (r) {
  246.                 radeon_bo_unreserve(rdev->wb.wb_obj);
  247.                 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
  248.                 radeon_wb_fini(rdev);
  249.                 return r;
  250.         }
  251.         r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
  252.         radeon_bo_unreserve(rdev->wb.wb_obj);
  253.         if (r) {
  254.                 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
  255.                 radeon_wb_fini(rdev);
  256.                 return r;
  257.         }
  258.  
  259.         /* clear wb memory */
  260.         memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
  261.         /* disable event_write fences */
  262.         rdev->wb.use_event = false;
  263.         /* disabled via module param */
  264.         if (radeon_no_wb == 1)
  265.                 rdev->wb.enabled = false;
  266.         else {
  267.                 /* often unreliable on AGP */
  268. //              if (rdev->flags & RADEON_IS_AGP) {
  269. //                      rdev->wb.enabled = false;
  270. //              } else {
  271.                         rdev->wb.enabled = true;
  272.                         /* event_write fences are only available on r600+ */
  273.                         if (rdev->family >= CHIP_R600)
  274.                                 rdev->wb.use_event = true;
  275. //              }
  276.         }
  277.         /* always use writeback/events on NI */
  278.         if (ASIC_IS_DCE5(rdev)) {
  279.                 rdev->wb.enabled = true;
  280.                 rdev->wb.use_event = true;
  281.         }
  282.  
  283.         dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
  284.  
  285.         return 0;
  286. }
  287.  
  288. /**
  289.  * radeon_vram_location - try to find VRAM location
  290.  * @rdev: radeon device structure holding all necessary informations
  291.  * @mc: memory controller structure holding memory informations
  292.  * @base: base address at which to put VRAM
  293.  *
  294.  * Function will place try to place VRAM at base address provided
  295.  * as parameter (which is so far either PCI aperture address or
  296.  * for IGP TOM base address).
  297.  *
  298.  * If there is not enough space to fit the unvisible VRAM in the 32bits
  299.  * address space then we limit the VRAM size to the aperture.
  300.  *
  301.  * If we are using AGP and if the AGP aperture doesn't allow us to have
  302.  * room for all the VRAM than we restrict the VRAM to the PCI aperture
  303.  * size and print a warning.
  304.  *
  305.  * This function will never fails, worst case are limiting VRAM.
  306.  *
  307.  * Note: GTT start, end, size should be initialized before calling this
  308.  * function on AGP platform.
  309.  *
  310.  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
  311.  * this shouldn't be a problem as we are using the PCI aperture as a reference.
  312.  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
  313.  * not IGP.
  314.  *
  315.  * Note: we use mc_vram_size as on some board we need to program the mc to
  316.  * cover the whole aperture even if VRAM size is inferior to aperture size
  317.  * Novell bug 204882 + along with lots of ubuntu ones
  318.  *
  319.  * Note: when limiting vram it's safe to overwritte real_vram_size because
  320.  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
  321.  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
  322.  * ones)
  323.  *
  324.  * Note: IGP TOM addr should be the same as the aperture addr, we don't
  325.  * explicitly check for that thought.
  326.  *
  327.  * FIXME: when reducing VRAM size align new size on power of 2.
  328.  */
  329. void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
  330. {
  331.         mc->vram_start = base;
  332.         if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
  333.                 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
  334.                 mc->real_vram_size = mc->aper_size;
  335.                 mc->mc_vram_size = mc->aper_size;
  336.         }
  337.         mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  338.         if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
  339.                 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
  340.                 mc->real_vram_size = mc->aper_size;
  341.                 mc->mc_vram_size = mc->aper_size;
  342.                 }
  343.         mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  344.         dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
  345.                         mc->mc_vram_size >> 20, mc->vram_start,
  346.                         mc->vram_end, mc->real_vram_size >> 20);
  347. }
  348.  
  349. /**
  350.  * radeon_gtt_location - try to find GTT location
  351.  * @rdev: radeon device structure holding all necessary informations
  352.  * @mc: memory controller structure holding memory informations
  353.  *
  354.  * Function will place try to place GTT before or after VRAM.
  355.  *
  356.  * If GTT size is bigger than space left then we ajust GTT size.
  357.  * Thus function will never fails.
  358.  *
  359.  * FIXME: when reducing GTT size align new size on power of 2.
  360.  */
  361. void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  362. {
  363.         u64 size_af, size_bf;
  364.  
  365.         size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
  366.         size_bf = mc->vram_start & ~mc->gtt_base_align;
  367.         if (size_bf > size_af) {
  368.                 if (mc->gtt_size > size_bf) {
  369.                         dev_warn(rdev->dev, "limiting GTT\n");
  370.                         mc->gtt_size = size_bf;
  371.                 }
  372.                 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
  373.         } else {
  374.                 if (mc->gtt_size > size_af) {
  375.                         dev_warn(rdev->dev, "limiting GTT\n");
  376.                         mc->gtt_size = size_af;
  377.                 }
  378.                 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
  379.         }
  380.         mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
  381.         dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
  382.                         mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
  383. }
  384.  
  385. /*
  386.  * GPU helpers function.
  387.  */
  388. bool radeon_card_posted(struct radeon_device *rdev)
  389. {
  390.         uint32_t reg;
  391.  
  392.         /* first check CRTCs */
  393.         if (ASIC_IS_DCE41(rdev)) {
  394.                 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
  395.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
  396.                 if (reg & EVERGREEN_CRTC_MASTER_EN)
  397.                         return true;
  398.         } else if (ASIC_IS_DCE4(rdev)) {
  399.                 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
  400.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
  401.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
  402.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
  403.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
  404.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
  405.                 if (reg & EVERGREEN_CRTC_MASTER_EN)
  406.                         return true;
  407.         } else if (ASIC_IS_AVIVO(rdev)) {
  408.                 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
  409.                       RREG32(AVIVO_D2CRTC_CONTROL);
  410.                 if (reg & AVIVO_CRTC_EN) {
  411.                         return true;
  412.                 }
  413.         } else {
  414.                 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
  415.                       RREG32(RADEON_CRTC2_GEN_CNTL);
  416.                 if (reg & RADEON_CRTC_EN) {
  417.                         return true;
  418.                 }
  419.         }
  420.  
  421.         /* then check MEM_SIZE, in case the crtcs are off */
  422.         if (rdev->family >= CHIP_R600)
  423.                 reg = RREG32(R600_CONFIG_MEMSIZE);
  424.         else
  425.                 reg = RREG32(RADEON_CONFIG_MEMSIZE);
  426.  
  427.         if (reg)
  428.                 return true;
  429.  
  430.         return false;
  431.  
  432. }
  433.  
  434. void radeon_update_bandwidth_info(struct radeon_device *rdev)
  435. {
  436.         fixed20_12 a;
  437.         u32 sclk = rdev->pm.current_sclk;
  438.         u32 mclk = rdev->pm.current_mclk;
  439.  
  440.         /* sclk/mclk in Mhz */
  441.                 a.full = dfixed_const(100);
  442.                 rdev->pm.sclk.full = dfixed_const(sclk);
  443.                 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
  444.                 rdev->pm.mclk.full = dfixed_const(mclk);
  445.                 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
  446.  
  447.         if (rdev->flags & RADEON_IS_IGP) {
  448.                 a.full = dfixed_const(16);
  449.                 /* core_bandwidth = sclk(Mhz) * 16 */
  450.                 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
  451.         }
  452. }
  453.  
  454. bool radeon_boot_test_post_card(struct radeon_device *rdev)
  455. {
  456.         if (radeon_card_posted(rdev))
  457.                 return true;
  458.  
  459.         if (rdev->bios) {
  460.                 DRM_INFO("GPU not posted. posting now...\n");
  461.                 if (rdev->is_atom_bios)
  462.                         atom_asic_init(rdev->mode_info.atom_context);
  463.                 else
  464.                         radeon_combios_asic_init(rdev->ddev);
  465.                 return true;
  466.         } else {
  467.                 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  468.                 return false;
  469.         }
  470. }
  471.  
  472. int radeon_dummy_page_init(struct radeon_device *rdev)
  473. {
  474.         if (rdev->dummy_page.page)
  475.                 return 0;
  476.     rdev->dummy_page.page = AllocPage();
  477.         if (rdev->dummy_page.page == NULL)
  478.                 return -ENOMEM;
  479.     rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
  480.         if (!rdev->dummy_page.addr) {
  481. //       __free_page(rdev->dummy_page.page);
  482.                 rdev->dummy_page.page = NULL;
  483.                 return -ENOMEM;
  484.         }
  485.         return 0;
  486. }
  487.  
  488. void radeon_dummy_page_fini(struct radeon_device *rdev)
  489. {
  490.         if (rdev->dummy_page.page == NULL)
  491.                 return;
  492.     KernelFree(rdev->dummy_page.addr);
  493.         rdev->dummy_page.page = NULL;
  494. }
  495.  
  496.  
  497. /* ATOM accessor methods */
  498. static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
  499. {
  500.     struct radeon_device *rdev = info->dev->dev_private;
  501.     uint32_t r;
  502.  
  503.     r = rdev->pll_rreg(rdev, reg);
  504.     return r;
  505. }
  506.  
  507. static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
  508. {
  509.     struct radeon_device *rdev = info->dev->dev_private;
  510.  
  511.     rdev->pll_wreg(rdev, reg, val);
  512. }
  513.  
  514. static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
  515. {
  516.     struct radeon_device *rdev = info->dev->dev_private;
  517.     uint32_t r;
  518.  
  519.     r = rdev->mc_rreg(rdev, reg);
  520.     return r;
  521. }
  522.  
  523. static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
  524. {
  525.     struct radeon_device *rdev = info->dev->dev_private;
  526.  
  527.     rdev->mc_wreg(rdev, reg, val);
  528. }
  529.  
  530. static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
  531. {
  532.     struct radeon_device *rdev = info->dev->dev_private;
  533.  
  534.     WREG32(reg*4, val);
  535. }
  536.  
  537. static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
  538. {
  539.     struct radeon_device *rdev = info->dev->dev_private;
  540.     uint32_t r;
  541.  
  542.     r = RREG32(reg*4);
  543.     return r;
  544. }
  545.  
  546. static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
  547. {
  548.         struct radeon_device *rdev = info->dev->dev_private;
  549.  
  550.         WREG32_IO(reg*4, val);
  551. }
  552.  
  553. static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  554. {
  555.         struct radeon_device *rdev = info->dev->dev_private;
  556.         uint32_t r;
  557.  
  558.         r = RREG32_IO(reg*4);
  559.         return r;
  560. }
  561.  
  562. int radeon_atombios_init(struct radeon_device *rdev)
  563. {
  564.         struct card_info *atom_card_info =
  565.             kzalloc(sizeof(struct card_info), GFP_KERNEL);
  566.  
  567.         if (!atom_card_info)
  568.                 return -ENOMEM;
  569.  
  570.         rdev->mode_info.atom_card_info = atom_card_info;
  571.         atom_card_info->dev = rdev->ddev;
  572.         atom_card_info->reg_read = cail_reg_read;
  573.         atom_card_info->reg_write = cail_reg_write;
  574.         /* needed for iio ops */
  575.         if (rdev->rio_mem) {
  576.                 atom_card_info->ioreg_read = cail_ioreg_read;
  577.                 atom_card_info->ioreg_write = cail_ioreg_write;
  578.         } else {
  579.                 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
  580.                 atom_card_info->ioreg_read = cail_reg_read;
  581.                 atom_card_info->ioreg_write = cail_reg_write;
  582.         }
  583.         atom_card_info->mc_read = cail_mc_read;
  584.         atom_card_info->mc_write = cail_mc_write;
  585.         atom_card_info->pll_read = cail_pll_read;
  586.         atom_card_info->pll_write = cail_pll_write;
  587.  
  588.         rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
  589.         mutex_init(&rdev->mode_info.atom_context->mutex);
  590.     radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
  591.         atom_allocate_fb_scratch(rdev->mode_info.atom_context);
  592.     return 0;
  593. }
  594.  
  595. void radeon_atombios_fini(struct radeon_device *rdev)
  596. {
  597.         if (rdev->mode_info.atom_context) {
  598.                 kfree(rdev->mode_info.atom_context->scratch);
  599.         kfree(rdev->mode_info.atom_context);
  600.         }
  601.         kfree(rdev->mode_info.atom_card_info);
  602. }
  603.  
  604. int radeon_combios_init(struct radeon_device *rdev)
  605. {
  606.         radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
  607.         return 0;
  608. }
  609.  
  610. void radeon_combios_fini(struct radeon_device *rdev)
  611. {
  612. }
  613.  
  614. /* if we get transitioned to only one device, tak VGA back */
  615. static unsigned int radeon_vga_set_decode(void *cookie, bool state)
  616. {
  617.         struct radeon_device *rdev = cookie;
  618.         radeon_vga_set_state(rdev, state);
  619.         if (state)
  620.                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  621.                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  622.         else
  623.                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  624. }
  625.  
  626. void radeon_check_arguments(struct radeon_device *rdev)
  627. {
  628.         /* vramlimit must be a power of two */
  629.         switch (radeon_vram_limit) {
  630.         case 0:
  631.         case 4:
  632.         case 8:
  633.         case 16:
  634.         case 32:
  635.         case 64:
  636.         case 128:
  637.         case 256:
  638.         case 512:
  639.         case 1024:
  640.         case 2048:
  641.         case 4096:
  642.                 break;
  643.         default:
  644.                 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
  645.                                 radeon_vram_limit);
  646.                 radeon_vram_limit = 0;
  647.                 break;
  648.         }
  649.         radeon_vram_limit = radeon_vram_limit << 20;
  650.         /* gtt size must be power of two and greater or equal to 32M */
  651.         switch (radeon_gart_size) {
  652.         case 4:
  653.         case 8:
  654.         case 16:
  655.                 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
  656.                                 radeon_gart_size);
  657.                 radeon_gart_size = 512;
  658.                 break;
  659.         case 32:
  660.         case 64:
  661.         case 128:
  662.         case 256:
  663.         case 512:
  664.         case 1024:
  665.         case 2048:
  666.         case 4096:
  667.                 break;
  668.         default:
  669.                 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
  670.                                 radeon_gart_size);
  671.                 radeon_gart_size = 512;
  672.                 break;
  673.         }
  674.         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
  675.         /* AGP mode can only be -1, 1, 2, 4, 8 */
  676.         switch (radeon_agpmode) {
  677.         case -1:
  678.         case 0:
  679.         case 1:
  680.         case 2:
  681.         case 4:
  682.         case 8:
  683.                 break;
  684.         default:
  685.                 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
  686.                                 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
  687.                 radeon_agpmode = 0;
  688.                 break;
  689.         }
  690. }
  691.  
  692. int radeon_device_init(struct radeon_device *rdev,
  693.                struct drm_device *ddev,
  694.                struct pci_dev *pdev,
  695.                uint32_t flags)
  696. {
  697.         int r, i;
  698.         int dma_bits;
  699.  
  700.     rdev->shutdown = false;
  701.     rdev->ddev = ddev;
  702.     rdev->pdev = pdev;
  703.     rdev->flags = flags;
  704.     rdev->family = flags & RADEON_FAMILY_MASK;
  705.     rdev->is_atom_bios = false;
  706.     rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
  707.     rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
  708.     rdev->gpu_lockup = false;
  709.         rdev->accel_working = false;
  710.  
  711.         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
  712.                 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
  713.  
  714.     /* mutex initialization are all done here so we
  715.      * can recall function without having locking issues */
  716.     mutex_init(&rdev->cs_mutex);
  717.     mutex_init(&rdev->ib_pool.mutex);
  718.     mutex_init(&rdev->cp.mutex);
  719.         mutex_init(&rdev->dc_hw_i2c_mutex);
  720.         if (rdev->family >= CHIP_R600)
  721.                 spin_lock_init(&rdev->ih.lock);
  722.         mutex_init(&rdev->gem.mutex);
  723.         mutex_init(&rdev->pm.mutex);
  724.         mutex_init(&rdev->vram_mutex);
  725.         rwlock_init(&rdev->fence_drv.lock);
  726.         INIT_LIST_HEAD(&rdev->gem.objects);
  727.  
  728.         /* Set asic functions */
  729.         r = radeon_asic_init(rdev);
  730.         if (r)
  731.                 return r;
  732.         radeon_check_arguments(rdev);
  733.  
  734.         /* all of the newer IGP chips have an internal gart
  735.          * However some rs4xx report as AGP, so remove that here.
  736.          */
  737.         if ((rdev->family >= CHIP_RS400) &&
  738.             (rdev->flags & RADEON_IS_IGP)) {
  739.                 rdev->flags &= ~RADEON_IS_AGP;
  740.         }
  741.  
  742.         if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
  743.                 radeon_agp_disable(rdev);
  744.     }
  745.  
  746.         /* set DMA mask + need_dma32 flags.
  747.          * PCIE - can handle 40-bits.
  748.          * IGP - can handle 40-bits (in theory)
  749.          * AGP - generally dma32 is safest
  750.          * PCI - only dma32
  751.          */
  752.         rdev->need_dma32 = false;
  753.         if (rdev->flags & RADEON_IS_AGP)
  754.                 rdev->need_dma32 = true;
  755.         if (rdev->flags & RADEON_IS_PCI)
  756.                 rdev->need_dma32 = true;
  757.  
  758.         dma_bits = rdev->need_dma32 ? 32 : 40;
  759.         r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
  760.     if (r) {
  761.                 rdev->need_dma32 = true;
  762.         printk(KERN_WARNING "radeon: No suitable DMA available.\n");
  763.     }
  764.  
  765.     /* Registers mapping */
  766.     /* TODO: block userspace mapping of io register */
  767.     rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
  768.     rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
  769.  
  770.     rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
  771.                                    PG_SW+PG_NOCACHE);
  772.  
  773.     if (rdev->rmmio == NULL) {
  774.         return -ENOMEM;
  775.     }
  776.     DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
  777.     DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
  778.  
  779.         r = radeon_init(rdev);
  780.         if (r)
  781.         return r;
  782.  
  783.         if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
  784.                 /* Acceleration not working on AGP card try again
  785.                  * with fallback to PCI or PCIE GART
  786.                  */
  787.                 radeon_asic_reset(rdev);
  788.                 radeon_fini(rdev);
  789.                 radeon_agp_disable(rdev);
  790.                 r = radeon_init(rdev);
  791.                 if (r)
  792.                 return r;
  793.         }
  794. //      if (radeon_testing) {
  795. //              radeon_test_moves(rdev);
  796. //    }
  797.    if (radeon_benchmarking) {
  798.        radeon_benchmark(rdev);
  799.     }
  800.         return 0;
  801. }
  802.  
  803.  
  804. /*
  805.  * Driver load/unload
  806.  */
  807. int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
  808. {
  809.     struct radeon_device *rdev;
  810.     int r;
  811.  
  812.     ENTER();
  813.  
  814.     rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
  815.     if (rdev == NULL) {
  816.         return -ENOMEM;
  817.     };
  818.  
  819.     dev->dev_private = (void *)rdev;
  820.  
  821.     /* update BUS flag */
  822.     if (drm_device_is_agp(dev)) {
  823.         flags |= RADEON_IS_AGP;
  824.     } else if (drm_device_is_pcie(dev)) {
  825.         flags |= RADEON_IS_PCIE;
  826.     } else {
  827.         flags |= RADEON_IS_PCI;
  828.     }
  829.  
  830.     /* radeon_device_init should report only fatal error
  831.      * like memory allocation failure or iomapping failure,
  832.      * or memory manager initialization failure, it must
  833.      * properly initialize the GPU MC controller and permit
  834.      * VRAM allocation
  835.      */
  836.     r = radeon_device_init(rdev, dev, dev->pdev, flags);
  837.     if (r) {
  838.         DRM_ERROR("Fatal error while trying to initialize radeon.\n");
  839.         return r;
  840.     }
  841.     /* Again modeset_init should fail only on fatal error
  842.      * otherwise it should provide enough functionalities
  843.      * for shadowfb to run
  844.      */
  845.     if( radeon_modeset )
  846.     {
  847.         r = radeon_modeset_init(rdev);
  848.         if (r) {
  849.             return r;
  850.         }
  851.     };
  852.     return 0;
  853. }
  854.  
  855. videomode_t usermode;
  856.  
  857.  
  858. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
  859. {
  860.     static struct drm_device *dev;
  861.     int ret;
  862.  
  863.     ENTER();
  864.  
  865.     dev = kzalloc(sizeof(*dev), 0);
  866.     if (!dev)
  867.         return -ENOMEM;
  868.  
  869.  //   ret = pci_enable_device(pdev);
  870.  //   if (ret)
  871.  //       goto err_g1;
  872.  
  873.  //   pci_set_master(pdev);
  874.  
  875.  //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
  876.  //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  877.  //       goto err_g2;
  878.  //   }
  879.  
  880.     dev->pdev = pdev;
  881.     dev->pci_device = pdev->device;
  882.     dev->pci_vendor = pdev->vendor;
  883.  
  884.     INIT_LIST_HEAD(&dev->filelist);
  885.     INIT_LIST_HEAD(&dev->ctxlist);
  886.     INIT_LIST_HEAD(&dev->vmalist);
  887.     INIT_LIST_HEAD(&dev->maplist);
  888.  
  889.     spin_lock_init(&dev->count_lock);
  890.     mutex_init(&dev->struct_mutex);
  891.     mutex_init(&dev->ctxlist_mutex);
  892.  
  893.  
  894.     ret = radeon_driver_load_kms(dev, ent->driver_data );
  895.     if (ret)
  896.         goto err_g4;
  897.  
  898.     if( radeon_modeset )
  899.         init_display_kms(dev->dev_private, &usermode);
  900.     else
  901.         init_display(dev->dev_private, &usermode);
  902.  
  903.  
  904.     uint32_t route0 = PciRead32(0, 31<<3, 0x60);
  905.  
  906.     uint32_t route1 = PciRead32(0, 31<<3, 0x68);
  907.  
  908.     uint8_t elcr0 = in8(0x4D0);
  909.     uint8_t elcr1 = in8(0x4D1);
  910.  
  911.     dbgprintf("pci route: %x %x elcr: %x %x\n", route0, route1, elcr0, elcr1);
  912.  
  913.     LEAVE();
  914.  
  915.     return 0;
  916.  
  917. err_g4:
  918. //    drm_put_minor(&dev->primary);
  919. //err_g3:
  920. //    if (drm_core_check_feature(dev, DRIVER_MODESET))
  921. //        drm_put_minor(&dev->control);
  922. //err_g2:
  923. //    pci_disable_device(pdev);
  924. //err_g1:
  925.     free(dev);
  926.  
  927.     LEAVE();
  928.  
  929.     return ret;
  930. }
  931.  
  932. resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
  933. {
  934.     return pci_resource_start(dev->pdev, resource);
  935. }
  936.  
  937. resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
  938. {
  939.     return pci_resource_len(dev->pdev, resource);
  940. }
  941.  
  942.  
  943. uint32_t __div64_32(uint64_t *n, uint32_t base)
  944. {
  945.         uint64_t rem = *n;
  946.         uint64_t b = base;
  947.         uint64_t res, d = 1;
  948.         uint32_t high = rem >> 32;
  949.  
  950.         /* Reduce the thing a bit first */
  951.         res = 0;
  952.         if (high >= base) {
  953.                 high /= base;
  954.                 res = (uint64_t) high << 32;
  955.                 rem -= (uint64_t) (high*base) << 32;
  956.         }
  957.  
  958.         while ((int64_t)b > 0 && b < rem) {
  959.                 b = b+b;
  960.                 d = d+d;
  961.         }
  962.  
  963.         do {
  964.                 if (rem >= b) {
  965.                         rem -= b;
  966.                         res += d;
  967.                 }
  968.                 b >>= 1;
  969.                 d >>= 1;
  970.         } while (d);
  971.  
  972.         *n = res;
  973.         return rem;
  974. }
  975.  
  976.  
  977. static struct pci_device_id pciidlist[] = {
  978.     radeon_PCI_IDS
  979. };
  980.  
  981.  
  982. #define API_VERSION     0x01000100
  983.  
  984. #define SRV_GETVERSION  0
  985. #define SRV_ENUM_MODES  1
  986. #define SRV_SET_MODE    2
  987.  
  988. #define SRV_CREATE_VIDEO 9
  989. #define SRV_BLIT_VIDEO   10
  990.  
  991. int r600_video_blit(uint64_t src_offset, int  x, int y,
  992.                     int w, int h, int pitch);
  993.  
  994. int _stdcall display_handler(ioctl_t *io)
  995. {
  996.     int    retval = -1;
  997.     u32_t *inp;
  998.     u32_t *outp;
  999.  
  1000.     inp = io->input;
  1001.     outp = io->output;
  1002.  
  1003.     switch(io->io_code)
  1004.     {
  1005.         case SRV_GETVERSION:
  1006.             if(io->out_size==4)
  1007.             {
  1008.                 *outp  = API_VERSION;
  1009.                 retval = 0;
  1010.             }
  1011.             break;
  1012.  
  1013.         case SRV_ENUM_MODES:
  1014.             dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
  1015.                        inp, io->inp_size, io->out_size );
  1016.  
  1017.             if( radeon_modeset &&
  1018.                 (outp != NULL) && (io->out_size == 4) &&
  1019.                 (io->inp_size == *outp * sizeof(videomode_t)) )
  1020.             {
  1021.                 retval = get_modes((videomode_t*)inp, outp);
  1022.             };
  1023.             break;
  1024.  
  1025.         case SRV_SET_MODE:
  1026.             dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
  1027.                        inp, io->inp_size);
  1028.  
  1029.             if(  radeon_modeset   &&
  1030.                 (inp != NULL) &&
  1031.                 (io->inp_size == sizeof(videomode_t)) )
  1032.             {
  1033.                 retval = set_user_mode((videomode_t*)inp);
  1034.             };
  1035.             break;
  1036.  
  1037.         case SRV_CREATE_VIDEO:
  1038.             retval = r600_create_video(inp[0], inp[1], outp);
  1039.             break;
  1040.  
  1041.         case SRV_BLIT_VIDEO:
  1042.             r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
  1043.                     inp[4], inp[5], inp[6]);
  1044.  
  1045.             retval = 0;
  1046.             break;
  1047.  
  1048.     };
  1049.  
  1050.     return retval;
  1051. }
  1052.  
  1053. static char  log[256];
  1054. static pci_dev_t device;
  1055.  
  1056. u32_t drvEntry(int action, char *cmdline)
  1057. {
  1058.     struct radeon_device *rdev = NULL;
  1059.  
  1060.     struct pci_device_id  *ent;
  1061.  
  1062.     int     err;
  1063.     u32_t   retval = 0;
  1064.  
  1065.     if(action != 1)
  1066.         return 0;
  1067.  
  1068.     if( GetService("DISPLAY") != 0 )
  1069.         return 0;
  1070.  
  1071.     if( cmdline && *cmdline )
  1072.         parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
  1073.  
  1074.     if(!dbg_open(log))
  1075.     {
  1076.         strcpy(log, "/RD/1/DRIVERS/atikms.log");
  1077.  
  1078.         if(!dbg_open(log))
  1079.         {
  1080.             printf("Can't open %s\nExit\n", log);
  1081.             return 0;
  1082.         };
  1083.     }
  1084.     dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
  1085.  
  1086.     enum_pci_devices();
  1087.  
  1088.     ent = find_pci_device(&device, pciidlist);
  1089.  
  1090.     if( unlikely(ent == NULL) )
  1091.     {
  1092.         dbgprintf("device not found\n");
  1093.         return 0;
  1094.     };
  1095.  
  1096.     dbgprintf("device %x:%x\n", device.pci_dev.vendor,
  1097.                                 device.pci_dev.device);
  1098.  
  1099.     err = drm_get_dev(&device.pci_dev, ent);
  1100.  
  1101.     rdev = rdisplay->ddev->dev_private;
  1102.  
  1103.     err = RegService("DISPLAY", display_handler);
  1104.  
  1105.     if( err != 0)
  1106.         dbgprintf("Set DISPLAY handler\n");
  1107.  
  1108.     return err;
  1109. };
  1110.  
  1111. void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
  1112. {};
  1113.  
  1114. void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
  1115. {};
  1116.  
  1117.