Subversion Repositories Kolibri OS

Rev

Rev 2004 | Rev 2007 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2008 Advanced Micro Devices, Inc.
  3.  * Copyright 2008 Red Hat Inc.
  4.  * Copyright 2009 Jerome Glisse.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the "Software"),
  8.  * to deal in the Software without restriction, including without limitation
  9.  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10.  * and/or sell copies of the Software, and to permit persons to whom the
  11.  * Software is furnished to do so, subject to the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice shall be included in
  14.  * all copies or substantial portions of the Software.
  15.  *
  16.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18.  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  19.  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20.  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21.  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22.  * OTHER DEALINGS IN THE SOFTWARE.
  23.  *
  24.  * Authors: Dave Airlie
  25.  *          Alex Deucher
  26.  *          Jerome Glisse
  27.  */
  28. //#include <linux/console.h>
  29.  
  30. #include <drm/drmP.h>
  31. #include <drm/drm_crtc_helper.h>
  32. #include <drm/radeon_drm.h>
  33. #include "radeon_reg.h"
  34. #include "radeon.h"
  35. #include "atom.h"
  36. #include "display.h"
  37.  
  38. #include <drm/drm_pciids.h>
  39.  
  40.  
  41. int radeon_no_wb;
  42. int radeon_modeset = -1;
  43. int radeon_dynclks = -1;
  44. int radeon_r4xx_atom = 0;
  45. int radeon_agpmode = 0;
  46. int radeon_vram_limit = 0;
  47. int radeon_gart_size = 512; /* default gart size */
  48. int radeon_benchmarking = 0;
  49. int radeon_testing = 0;
  50. int radeon_connector_table = 0;
  51. int radeon_tv = 1;
  52. int radeon_new_pll = -1;
  53. int radeon_dynpm = -1;
  54. int radeon_audio = 1;
  55. int radeon_hw_i2c = 0;
  56. int radeon_pcie_gen2 = 0;
  57. int radeon_disp_priority = 0;
  58.  
  59.  
  60.  
  61. extern display_t *rdisplay;
  62.  
  63. void parse_cmdline(char *cmdline, videomode_t *mode, char *log, int *kms);
  64. int init_display(struct radeon_device *rdev, videomode_t *mode);
  65. int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
  66.  
  67. int get_modes(videomode_t *mode, int *count);
  68. int set_user_mode(videomode_t *mode);
  69. int r100_2D_test(struct radeon_device *rdev);
  70.  
  71.  
  72.  /* Legacy VGA regions */
  73. #define VGA_RSRC_NONE          0x00
  74. #define VGA_RSRC_LEGACY_IO     0x01
  75. #define VGA_RSRC_LEGACY_MEM    0x02
  76. #define VGA_RSRC_LEGACY_MASK   (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)
  77. /* Non-legacy access */
  78. #define VGA_RSRC_NORMAL_IO     0x04
  79. #define VGA_RSRC_NORMAL_MEM    0x08
  80.  
  81.  
  82. static const char radeon_family_name[][16] = {
  83.         "R100",
  84.         "RV100",
  85.         "RS100",
  86.         "RV200",
  87.         "RS200",
  88.         "R200",
  89.         "RV250",
  90.         "RS300",
  91.         "RV280",
  92.         "R300",
  93.         "R350",
  94.         "RV350",
  95.         "RV380",
  96.         "R420",
  97.         "R423",
  98.         "RV410",
  99.         "RS400",
  100.         "RS480",
  101.         "RS600",
  102.         "RS690",
  103.         "RS740",
  104.         "RV515",
  105.         "R520",
  106.         "RV530",
  107.         "RV560",
  108.         "RV570",
  109.         "R580",
  110.         "R600",
  111.         "RV610",
  112.         "RV630",
  113.         "RV670",
  114.         "RV620",
  115.         "RV635",
  116.         "RS780",
  117.         "RS880",
  118.         "RV770",
  119.         "RV730",
  120.         "RV710",
  121.         "RV740",
  122.         "CEDAR",
  123.         "REDWOOD",
  124.         "JUNIPER",
  125.         "CYPRESS",
  126.         "HEMLOCK",
  127.         "PALM",
  128.         "SUMO",
  129.         "SUMO2",
  130.         "BARTS",
  131.         "TURKS",
  132.         "CAICOS",
  133.         "CAYMAN",
  134.         "LAST",
  135. };
  136.  
  137. /*
  138.  * Clear GPU surface registers.
  139.  */
  140. void radeon_surface_init(struct radeon_device *rdev)
  141. {
  142.     /* FIXME: check this out */
  143.     if (rdev->family < CHIP_R600) {
  144.         int i;
  145.  
  146.                 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
  147.            radeon_clear_surface_reg(rdev, i);
  148.         }
  149.                 /* enable surfaces */
  150.                 WREG32(RADEON_SURFACE_CNTL, 0);
  151.     }
  152. }
  153.  
  154. /*
  155.  * GPU scratch registers helpers function.
  156.  */
  157. void radeon_scratch_init(struct radeon_device *rdev)
  158. {
  159.     int i;
  160.  
  161.     /* FIXME: check this out */
  162.     if (rdev->family < CHIP_R300) {
  163.         rdev->scratch.num_reg = 5;
  164.     } else {
  165.         rdev->scratch.num_reg = 7;
  166.     }
  167.         rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
  168.     for (i = 0; i < rdev->scratch.num_reg; i++) {
  169.         rdev->scratch.free[i] = true;
  170.                 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
  171.     }
  172. }
  173.  
  174. int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
  175. {
  176.         int i;
  177.  
  178.         for (i = 0; i < rdev->scratch.num_reg; i++) {
  179.                 if (rdev->scratch.free[i]) {
  180.                         rdev->scratch.free[i] = false;
  181.                         *reg = rdev->scratch.reg[i];
  182.                         return 0;
  183.                 }
  184.         }
  185.         return -EINVAL;
  186. }
  187.  
  188. void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
  189. {
  190.         int i;
  191.  
  192.         for (i = 0; i < rdev->scratch.num_reg; i++) {
  193.                 if (rdev->scratch.reg[i] == reg) {
  194.                         rdev->scratch.free[i] = true;
  195.                         return;
  196.                 }
  197.         }
  198. }
  199.  
  200. void radeon_wb_disable(struct radeon_device *rdev)
  201. {
  202.         int r;
  203.  
  204.         if (rdev->wb.wb_obj) {
  205.                 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
  206.                 if (unlikely(r != 0))
  207.                         return;
  208.                 radeon_bo_kunmap(rdev->wb.wb_obj);
  209.                 radeon_bo_unpin(rdev->wb.wb_obj);
  210.                 radeon_bo_unreserve(rdev->wb.wb_obj);
  211.         }
  212.         rdev->wb.enabled = false;
  213. }
  214.  
  215. void radeon_wb_fini(struct radeon_device *rdev)
  216. {
  217.         radeon_wb_disable(rdev);
  218.         if (rdev->wb.wb_obj) {
  219.                 radeon_bo_unref(&rdev->wb.wb_obj);
  220.                 rdev->wb.wb = NULL;
  221.                 rdev->wb.wb_obj = NULL;
  222.         }
  223. }
  224.  
  225. int radeon_wb_init(struct radeon_device *rdev)
  226. {
  227.         int r;
  228.  
  229.         if (rdev->wb.wb_obj == NULL) {
  230.                 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
  231.                                 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
  232.                 if (r) {
  233.                         dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
  234.                         return r;
  235.                 }
  236.         }
  237.         r = radeon_bo_reserve(rdev->wb.wb_obj, false);
  238.         if (unlikely(r != 0)) {
  239.                 radeon_wb_fini(rdev);
  240.                 return r;
  241.         }
  242.         r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
  243.                           &rdev->wb.gpu_addr);
  244.         if (r) {
  245.                 radeon_bo_unreserve(rdev->wb.wb_obj);
  246.                 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
  247.                 radeon_wb_fini(rdev);
  248.                 return r;
  249.         }
  250.         r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
  251.         radeon_bo_unreserve(rdev->wb.wb_obj);
  252.         if (r) {
  253.                 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
  254.                 radeon_wb_fini(rdev);
  255.                 return r;
  256.         }
  257.  
  258.         /* clear wb memory */
  259.         memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
  260.         /* disable event_write fences */
  261.         rdev->wb.use_event = false;
  262.         /* disabled via module param */
  263.         if (radeon_no_wb == 1)
  264.                 rdev->wb.enabled = false;
  265.         else {
  266.                 /* often unreliable on AGP */
  267. //              if (rdev->flags & RADEON_IS_AGP) {
  268. //                      rdev->wb.enabled = false;
  269. //              } else {
  270.                         rdev->wb.enabled = true;
  271.                         /* event_write fences are only available on r600+ */
  272.                         if (rdev->family >= CHIP_R600)
  273.                                 rdev->wb.use_event = true;
  274. //              }
  275.         }
  276.         /* always use writeback/events on NI */
  277.         if (ASIC_IS_DCE5(rdev)) {
  278.                 rdev->wb.enabled = true;
  279.                 rdev->wb.use_event = true;
  280.         }
  281.  
  282.         dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
  283.  
  284.         return 0;
  285. }
  286.  
  287. /**
  288.  * radeon_vram_location - try to find VRAM location
  289.  * @rdev: radeon device structure holding all necessary informations
  290.  * @mc: memory controller structure holding memory informations
  291.  * @base: base address at which to put VRAM
  292.  *
  293.  * Function will place try to place VRAM at base address provided
  294.  * as parameter (which is so far either PCI aperture address or
  295.  * for IGP TOM base address).
  296.  *
  297.  * If there is not enough space to fit the unvisible VRAM in the 32bits
  298.  * address space then we limit the VRAM size to the aperture.
  299.  *
  300.  * If we are using AGP and if the AGP aperture doesn't allow us to have
  301.  * room for all the VRAM than we restrict the VRAM to the PCI aperture
  302.  * size and print a warning.
  303.  *
  304.  * This function will never fails, worst case are limiting VRAM.
  305.  *
  306.  * Note: GTT start, end, size should be initialized before calling this
  307.  * function on AGP platform.
  308.  *
  309.  * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
  310.  * this shouldn't be a problem as we are using the PCI aperture as a reference.
  311.  * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
  312.  * not IGP.
  313.  *
  314.  * Note: we use mc_vram_size as on some board we need to program the mc to
  315.  * cover the whole aperture even if VRAM size is inferior to aperture size
  316.  * Novell bug 204882 + along with lots of ubuntu ones
  317.  *
  318.  * Note: when limiting vram it's safe to overwritte real_vram_size because
  319.  * we are not in case where real_vram_size is inferior to mc_vram_size (ie
  320.  * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
  321.  * ones)
  322.  *
  323.  * Note: IGP TOM addr should be the same as the aperture addr, we don't
  324.  * explicitly check for that thought.
  325.  *
  326.  * FIXME: when reducing VRAM size align new size on power of 2.
  327.  */
  328. void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
  329. {
  330.         mc->vram_start = base;
  331.         if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
  332.                 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
  333.                 mc->real_vram_size = mc->aper_size;
  334.                 mc->mc_vram_size = mc->aper_size;
  335.         }
  336.         mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  337.         if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
  338.                 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
  339.                 mc->real_vram_size = mc->aper_size;
  340.                 mc->mc_vram_size = mc->aper_size;
  341.                 }
  342.         mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
  343.         dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
  344.                         mc->mc_vram_size >> 20, mc->vram_start,
  345.                         mc->vram_end, mc->real_vram_size >> 20);
  346. }
  347.  
  348. /**
  349.  * radeon_gtt_location - try to find GTT location
  350.  * @rdev: radeon device structure holding all necessary informations
  351.  * @mc: memory controller structure holding memory informations
  352.  *
  353.  * Function will place try to place GTT before or after VRAM.
  354.  *
  355.  * If GTT size is bigger than space left then we ajust GTT size.
  356.  * Thus function will never fails.
  357.  *
  358.  * FIXME: when reducing GTT size align new size on power of 2.
  359.  */
  360. void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
  361. {
  362.         u64 size_af, size_bf;
  363.  
  364.         size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
  365.         size_bf = mc->vram_start & ~mc->gtt_base_align;
  366.         if (size_bf > size_af) {
  367.                 if (mc->gtt_size > size_bf) {
  368.                         dev_warn(rdev->dev, "limiting GTT\n");
  369.                         mc->gtt_size = size_bf;
  370.                 }
  371.                 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
  372.         } else {
  373.                 if (mc->gtt_size > size_af) {
  374.                         dev_warn(rdev->dev, "limiting GTT\n");
  375.                         mc->gtt_size = size_af;
  376.                 }
  377.                 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
  378.         }
  379.         mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
  380.         dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
  381.                         mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
  382. }
  383.  
  384. /*
  385.  * GPU helpers function.
  386.  */
  387. bool radeon_card_posted(struct radeon_device *rdev)
  388. {
  389.         uint32_t reg;
  390.  
  391.         /* first check CRTCs */
  392.         if (ASIC_IS_DCE41(rdev)) {
  393.                 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
  394.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
  395.                 if (reg & EVERGREEN_CRTC_MASTER_EN)
  396.                         return true;
  397.         } else if (ASIC_IS_DCE4(rdev)) {
  398.                 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
  399.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
  400.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
  401.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
  402.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
  403.                         RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
  404.                 if (reg & EVERGREEN_CRTC_MASTER_EN)
  405.                         return true;
  406.         } else if (ASIC_IS_AVIVO(rdev)) {
  407.                 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
  408.                       RREG32(AVIVO_D2CRTC_CONTROL);
  409.                 if (reg & AVIVO_CRTC_EN) {
  410.                         return true;
  411.                 }
  412.         } else {
  413.                 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
  414.                       RREG32(RADEON_CRTC2_GEN_CNTL);
  415.                 if (reg & RADEON_CRTC_EN) {
  416.                         return true;
  417.                 }
  418.         }
  419.  
  420.         /* then check MEM_SIZE, in case the crtcs are off */
  421.         if (rdev->family >= CHIP_R600)
  422.                 reg = RREG32(R600_CONFIG_MEMSIZE);
  423.         else
  424.                 reg = RREG32(RADEON_CONFIG_MEMSIZE);
  425.  
  426.         if (reg)
  427.                 return true;
  428.  
  429.         return false;
  430.  
  431. }
  432.  
  433. void radeon_update_bandwidth_info(struct radeon_device *rdev)
  434. {
  435.         fixed20_12 a;
  436.         u32 sclk = rdev->pm.current_sclk;
  437.         u32 mclk = rdev->pm.current_mclk;
  438.  
  439.         /* sclk/mclk in Mhz */
  440.                 a.full = dfixed_const(100);
  441.                 rdev->pm.sclk.full = dfixed_const(sclk);
  442.                 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
  443.                 rdev->pm.mclk.full = dfixed_const(mclk);
  444.                 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
  445.  
  446.         if (rdev->flags & RADEON_IS_IGP) {
  447.                 a.full = dfixed_const(16);
  448.                 /* core_bandwidth = sclk(Mhz) * 16 */
  449.                 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
  450.         }
  451. }
  452.  
  453. bool radeon_boot_test_post_card(struct radeon_device *rdev)
  454. {
  455.         if (radeon_card_posted(rdev))
  456.                 return true;
  457.  
  458.         if (rdev->bios) {
  459.                 DRM_INFO("GPU not posted. posting now...\n");
  460.                 if (rdev->is_atom_bios)
  461.                         atom_asic_init(rdev->mode_info.atom_context);
  462.                 else
  463.                         radeon_combios_asic_init(rdev->ddev);
  464.                 return true;
  465.         } else {
  466.                 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
  467.                 return false;
  468.         }
  469. }
  470.  
  471. int radeon_dummy_page_init(struct radeon_device *rdev)
  472. {
  473.         if (rdev->dummy_page.page)
  474.                 return 0;
  475.     rdev->dummy_page.page = AllocPage();
  476.         if (rdev->dummy_page.page == NULL)
  477.                 return -ENOMEM;
  478.     rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
  479.         if (!rdev->dummy_page.addr) {
  480. //       __free_page(rdev->dummy_page.page);
  481.                 rdev->dummy_page.page = NULL;
  482.                 return -ENOMEM;
  483.         }
  484.         return 0;
  485. }
  486.  
  487. void radeon_dummy_page_fini(struct radeon_device *rdev)
  488. {
  489.         if (rdev->dummy_page.page == NULL)
  490.                 return;
  491.     KernelFree(rdev->dummy_page.addr);
  492.         rdev->dummy_page.page = NULL;
  493. }
  494.  
  495.  
  496. /* ATOM accessor methods */
  497. static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
  498. {
  499.     struct radeon_device *rdev = info->dev->dev_private;
  500.     uint32_t r;
  501.  
  502.     r = rdev->pll_rreg(rdev, reg);
  503.     return r;
  504. }
  505.  
  506. static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
  507. {
  508.     struct radeon_device *rdev = info->dev->dev_private;
  509.  
  510.     rdev->pll_wreg(rdev, reg, val);
  511. }
  512.  
  513. static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
  514. {
  515.     struct radeon_device *rdev = info->dev->dev_private;
  516.     uint32_t r;
  517.  
  518.     r = rdev->mc_rreg(rdev, reg);
  519.     return r;
  520. }
  521.  
  522. static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
  523. {
  524.     struct radeon_device *rdev = info->dev->dev_private;
  525.  
  526.     rdev->mc_wreg(rdev, reg, val);
  527. }
  528.  
  529. static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
  530. {
  531.     struct radeon_device *rdev = info->dev->dev_private;
  532.  
  533.     WREG32(reg*4, val);
  534. }
  535.  
  536. static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
  537. {
  538.     struct radeon_device *rdev = info->dev->dev_private;
  539.     uint32_t r;
  540.  
  541.     r = RREG32(reg*4);
  542.     return r;
  543. }
  544.  
  545. static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
  546. {
  547.         struct radeon_device *rdev = info->dev->dev_private;
  548.  
  549.         WREG32_IO(reg*4, val);
  550. }
  551.  
  552. static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
  553. {
  554.         struct radeon_device *rdev = info->dev->dev_private;
  555.         uint32_t r;
  556.  
  557.         r = RREG32_IO(reg*4);
  558.         return r;
  559. }
  560.  
  561. int radeon_atombios_init(struct radeon_device *rdev)
  562. {
  563.         struct card_info *atom_card_info =
  564.             kzalloc(sizeof(struct card_info), GFP_KERNEL);
  565.  
  566.         if (!atom_card_info)
  567.                 return -ENOMEM;
  568.  
  569.         rdev->mode_info.atom_card_info = atom_card_info;
  570.         atom_card_info->dev = rdev->ddev;
  571.         atom_card_info->reg_read = cail_reg_read;
  572.         atom_card_info->reg_write = cail_reg_write;
  573.         /* needed for iio ops */
  574.         if (rdev->rio_mem) {
  575.                 atom_card_info->ioreg_read = cail_ioreg_read;
  576.                 atom_card_info->ioreg_write = cail_ioreg_write;
  577.         } else {
  578.                 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
  579.                 atom_card_info->ioreg_read = cail_reg_read;
  580.                 atom_card_info->ioreg_write = cail_reg_write;
  581.         }
  582.         atom_card_info->mc_read = cail_mc_read;
  583.         atom_card_info->mc_write = cail_mc_write;
  584.         atom_card_info->pll_read = cail_pll_read;
  585.         atom_card_info->pll_write = cail_pll_write;
  586.  
  587.         rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
  588.         mutex_init(&rdev->mode_info.atom_context->mutex);
  589.     radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
  590.         atom_allocate_fb_scratch(rdev->mode_info.atom_context);
  591.     return 0;
  592. }
  593.  
  594. void radeon_atombios_fini(struct radeon_device *rdev)
  595. {
  596.         if (rdev->mode_info.atom_context) {
  597.                 kfree(rdev->mode_info.atom_context->scratch);
  598.         kfree(rdev->mode_info.atom_context);
  599.         }
  600.         kfree(rdev->mode_info.atom_card_info);
  601. }
  602.  
  603. int radeon_combios_init(struct radeon_device *rdev)
  604. {
  605.         radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
  606.         return 0;
  607. }
  608.  
  609. void radeon_combios_fini(struct radeon_device *rdev)
  610. {
  611. }
  612.  
  613. /* if we get transitioned to only one device, tak VGA back */
  614. static unsigned int radeon_vga_set_decode(void *cookie, bool state)
  615. {
  616.         struct radeon_device *rdev = cookie;
  617.         radeon_vga_set_state(rdev, state);
  618.         if (state)
  619.                 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
  620.                        VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  621.         else
  622.                 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
  623. }
  624.  
  625. void radeon_check_arguments(struct radeon_device *rdev)
  626. {
  627.         /* vramlimit must be a power of two */
  628.         switch (radeon_vram_limit) {
  629.         case 0:
  630.         case 4:
  631.         case 8:
  632.         case 16:
  633.         case 32:
  634.         case 64:
  635.         case 128:
  636.         case 256:
  637.         case 512:
  638.         case 1024:
  639.         case 2048:
  640.         case 4096:
  641.                 break;
  642.         default:
  643.                 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
  644.                                 radeon_vram_limit);
  645.                 radeon_vram_limit = 0;
  646.                 break;
  647.         }
  648.         radeon_vram_limit = radeon_vram_limit << 20;
  649.         /* gtt size must be power of two and greater or equal to 32M */
  650.         switch (radeon_gart_size) {
  651.         case 4:
  652.         case 8:
  653.         case 16:
  654.                 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
  655.                                 radeon_gart_size);
  656.                 radeon_gart_size = 512;
  657.                 break;
  658.         case 32:
  659.         case 64:
  660.         case 128:
  661.         case 256:
  662.         case 512:
  663.         case 1024:
  664.         case 2048:
  665.         case 4096:
  666.                 break;
  667.         default:
  668.                 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
  669.                                 radeon_gart_size);
  670.                 radeon_gart_size = 512;
  671.                 break;
  672.         }
  673.         rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
  674.         /* AGP mode can only be -1, 1, 2, 4, 8 */
  675.         switch (radeon_agpmode) {
  676.         case -1:
  677.         case 0:
  678.         case 1:
  679.         case 2:
  680.         case 4:
  681.         case 8:
  682.                 break;
  683.         default:
  684.                 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
  685.                                 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
  686.                 radeon_agpmode = 0;
  687.                 break;
  688.         }
  689. }
  690.  
  691. int radeon_device_init(struct radeon_device *rdev,
  692.                struct drm_device *ddev,
  693.                struct pci_dev *pdev,
  694.                uint32_t flags)
  695. {
  696.         int r, i;
  697.         int dma_bits;
  698.  
  699.     rdev->shutdown = false;
  700.     rdev->ddev = ddev;
  701.     rdev->pdev = pdev;
  702.     rdev->flags = flags;
  703.     rdev->family = flags & RADEON_FAMILY_MASK;
  704.     rdev->is_atom_bios = false;
  705.     rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
  706.     rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
  707.     rdev->gpu_lockup = false;
  708.         rdev->accel_working = false;
  709.  
  710.         DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
  711.                 radeon_family_name[rdev->family], pdev->vendor, pdev->device);
  712.  
  713.     /* mutex initialization are all done here so we
  714.      * can recall function without having locking issues */
  715.     mutex_init(&rdev->cs_mutex);
  716.     mutex_init(&rdev->ib_pool.mutex);
  717.     mutex_init(&rdev->cp.mutex);
  718.         mutex_init(&rdev->dc_hw_i2c_mutex);
  719.         if (rdev->family >= CHIP_R600)
  720.                 spin_lock_init(&rdev->ih.lock);
  721.         mutex_init(&rdev->gem.mutex);
  722.         mutex_init(&rdev->pm.mutex);
  723.         mutex_init(&rdev->vram_mutex);
  724.         rwlock_init(&rdev->fence_drv.lock);
  725.         INIT_LIST_HEAD(&rdev->gem.objects);
  726.  
  727.         /* Set asic functions */
  728.         r = radeon_asic_init(rdev);
  729.         if (r)
  730.                 return r;
  731.         radeon_check_arguments(rdev);
  732.  
  733.         /* all of the newer IGP chips have an internal gart
  734.          * However some rs4xx report as AGP, so remove that here.
  735.          */
  736.         if ((rdev->family >= CHIP_RS400) &&
  737.             (rdev->flags & RADEON_IS_IGP)) {
  738.                 rdev->flags &= ~RADEON_IS_AGP;
  739.         }
  740.  
  741.         if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
  742.                 radeon_agp_disable(rdev);
  743.     }
  744.  
  745.         /* set DMA mask + need_dma32 flags.
  746.          * PCIE - can handle 40-bits.
  747.          * IGP - can handle 40-bits (in theory)
  748.          * AGP - generally dma32 is safest
  749.          * PCI - only dma32
  750.          */
  751.         rdev->need_dma32 = false;
  752.         if (rdev->flags & RADEON_IS_AGP)
  753.                 rdev->need_dma32 = true;
  754.         if (rdev->flags & RADEON_IS_PCI)
  755.                 rdev->need_dma32 = true;
  756.  
  757.         dma_bits = rdev->need_dma32 ? 32 : 40;
  758.         r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
  759.     if (r) {
  760.                 rdev->need_dma32 = true;
  761.         printk(KERN_WARNING "radeon: No suitable DMA available.\n");
  762.     }
  763.  
  764.     /* Registers mapping */
  765.     /* TODO: block userspace mapping of io register */
  766.     rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
  767.     rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
  768.  
  769.     rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
  770.                                    PG_SW+PG_NOCACHE);
  771.  
  772.     if (rdev->rmmio == NULL) {
  773.         return -ENOMEM;
  774.     }
  775.     DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
  776.     DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
  777.  
  778.         r = radeon_init(rdev);
  779.         if (r)
  780.         return r;
  781.  
  782.         if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
  783.                 /* Acceleration not working on AGP card try again
  784.                  * with fallback to PCI or PCIE GART
  785.                  */
  786.                 radeon_asic_reset(rdev);
  787.                 radeon_fini(rdev);
  788.                 radeon_agp_disable(rdev);
  789.                 r = radeon_init(rdev);
  790.                 if (r)
  791.                 return r;
  792.         }
  793. //      if (radeon_testing) {
  794. //              radeon_test_moves(rdev);
  795. //    }
  796.    if (radeon_benchmarking) {
  797.        radeon_benchmark(rdev);
  798.     }
  799.         return 0;
  800. }
  801.  
  802.  
  803. /*
  804.  * Driver load/unload
  805.  */
  806. int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
  807. {
  808.     struct radeon_device *rdev;
  809.     int r;
  810.  
  811.     ENTER();
  812.  
  813.     rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
  814.     if (rdev == NULL) {
  815.         return -ENOMEM;
  816.     };
  817.  
  818.     dev->dev_private = (void *)rdev;
  819.  
  820.     /* update BUS flag */
  821.     if (drm_device_is_agp(dev)) {
  822.         flags |= RADEON_IS_AGP;
  823.     } else if (drm_device_is_pcie(dev)) {
  824.         flags |= RADEON_IS_PCIE;
  825.     } else {
  826.         flags |= RADEON_IS_PCI;
  827.     }
  828.  
  829.     /* radeon_device_init should report only fatal error
  830.      * like memory allocation failure or iomapping failure,
  831.      * or memory manager initialization failure, it must
  832.      * properly initialize the GPU MC controller and permit
  833.      * VRAM allocation
  834.      */
  835.     r = radeon_device_init(rdev, dev, dev->pdev, flags);
  836.     if (r) {
  837.         DRM_ERROR("Fatal error while trying to initialize radeon.\n");
  838.         return r;
  839.     }
  840.     /* Again modeset_init should fail only on fatal error
  841.      * otherwise it should provide enough functionalities
  842.      * for shadowfb to run
  843.      */
  844.     if( radeon_modeset )
  845.     {
  846.         r = radeon_modeset_init(rdev);
  847.         if (r) {
  848.             return r;
  849.         }
  850.     };
  851.     return 0;
  852. }
  853.  
  854. videomode_t usermode;
  855.  
  856.  
  857. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
  858. {
  859.     static struct drm_device *dev;
  860.     int ret;
  861.  
  862.     ENTER();
  863.  
  864.     dev = kzalloc(sizeof(*dev), 0);
  865.     if (!dev)
  866.         return -ENOMEM;
  867.  
  868.  //   ret = pci_enable_device(pdev);
  869.  //   if (ret)
  870.  //       goto err_g1;
  871.  
  872.  //   pci_set_master(pdev);
  873.  
  874.  //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
  875.  //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  876.  //       goto err_g2;
  877.  //   }
  878.  
  879.     dev->pdev = pdev;
  880.     dev->pci_device = pdev->device;
  881.     dev->pci_vendor = pdev->vendor;
  882.  
  883.     INIT_LIST_HEAD(&dev->filelist);
  884.     INIT_LIST_HEAD(&dev->ctxlist);
  885.     INIT_LIST_HEAD(&dev->vmalist);
  886.     INIT_LIST_HEAD(&dev->maplist);
  887.  
  888.     spin_lock_init(&dev->count_lock);
  889.     mutex_init(&dev->struct_mutex);
  890.     mutex_init(&dev->ctxlist_mutex);
  891.  
  892.  
  893.     ret = radeon_driver_load_kms(dev, ent->driver_data );
  894.     if (ret)
  895.         goto err_g4;
  896.  
  897.     if( radeon_modeset )
  898.         init_display_kms(dev->dev_private, &usermode);
  899.     else
  900.         init_display(dev->dev_private, &usermode);
  901.  
  902.     LEAVE();
  903.  
  904.     return 0;
  905.  
  906. err_g4:
  907. //    drm_put_minor(&dev->primary);
  908. //err_g3:
  909. //    if (drm_core_check_feature(dev, DRIVER_MODESET))
  910. //        drm_put_minor(&dev->control);
  911. //err_g2:
  912. //    pci_disable_device(pdev);
  913. //err_g1:
  914.     free(dev);
  915.  
  916.     LEAVE();
  917.  
  918.     return ret;
  919. }
  920.  
  921. resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
  922. {
  923.     return pci_resource_start(dev->pdev, resource);
  924. }
  925.  
  926. resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
  927. {
  928.     return pci_resource_len(dev->pdev, resource);
  929. }
  930.  
  931.  
  932. uint32_t __div64_32(uint64_t *n, uint32_t base)
  933. {
  934.         uint64_t rem = *n;
  935.         uint64_t b = base;
  936.         uint64_t res, d = 1;
  937.         uint32_t high = rem >> 32;
  938.  
  939.         /* Reduce the thing a bit first */
  940.         res = 0;
  941.         if (high >= base) {
  942.                 high /= base;
  943.                 res = (uint64_t) high << 32;
  944.                 rem -= (uint64_t) (high*base) << 32;
  945.         }
  946.  
  947.         while ((int64_t)b > 0 && b < rem) {
  948.                 b = b+b;
  949.                 d = d+d;
  950.         }
  951.  
  952.         do {
  953.                 if (rem >= b) {
  954.                         rem -= b;
  955.                         res += d;
  956.                 }
  957.                 b >>= 1;
  958.                 d >>= 1;
  959.         } while (d);
  960.  
  961.         *n = res;
  962.         return rem;
  963. }
  964.  
  965.  
  966. static struct pci_device_id pciidlist[] = {
  967.     radeon_PCI_IDS
  968. };
  969.  
  970.  
  971. #define API_VERSION     0x01000100
  972.  
  973. #define SRV_GETVERSION  0
  974. #define SRV_ENUM_MODES  1
  975. #define SRV_SET_MODE    2
  976.  
  977. int _stdcall display_handler(ioctl_t *io)
  978. {
  979.     int    retval = -1;
  980.     u32_t *inp;
  981.     u32_t *outp;
  982.  
  983.     inp = io->input;
  984.     outp = io->output;
  985.  
  986.     switch(io->io_code)
  987.     {
  988.         case SRV_GETVERSION:
  989.             if(io->out_size==4)
  990.             {
  991.                 *outp  = API_VERSION;
  992.                 retval = 0;
  993.             }
  994.             break;
  995.  
  996.         case SRV_ENUM_MODES:
  997.             dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
  998.                        inp, io->inp_size, io->out_size );
  999.  
  1000.             if( radeon_modeset &&
  1001.                 (outp != NULL) && (io->out_size == 4) &&
  1002.                 (io->inp_size == *outp * sizeof(videomode_t)) )
  1003.             {
  1004.                 retval = get_modes((videomode_t*)inp, outp);
  1005.             };
  1006.             break;
  1007.  
  1008.         case SRV_SET_MODE:
  1009.             dbgprintf("SRV_SET_MODE inp %x inp_size %x\n",
  1010.                        inp, io->inp_size);
  1011.  
  1012.             if(  radeon_modeset   &&
  1013.                 (inp != NULL) &&
  1014.                 (io->inp_size == sizeof(videomode_t)) )
  1015.             {
  1016.                 retval = set_user_mode((videomode_t*)inp);
  1017.             };
  1018.             break;
  1019.     };
  1020.  
  1021.     return retval;
  1022. }
  1023.  
  1024. static char  log[256];
  1025. static pci_dev_t device;
  1026.  
  1027. u32_t drvEntry(int action, char *cmdline)
  1028. {
  1029.     struct radeon_device *rdev = NULL;
  1030.  
  1031.     struct pci_device_id  *ent;
  1032.  
  1033.     int     err;
  1034.     u32_t   retval = 0;
  1035.  
  1036.     if(action != 1)
  1037.         return 0;
  1038.  
  1039.     if( GetService("DISPLAY") != 0 )
  1040.         return 0;
  1041.  
  1042.     if( cmdline && *cmdline )
  1043.         parse_cmdline(cmdline, &usermode, log, &radeon_modeset);
  1044.  
  1045.     if(!dbg_open(log))
  1046.     {
  1047.         strcpy(log, "/RD/1/DRIVERS/atikms.log");
  1048.  
  1049.         if(!dbg_open(log))
  1050.         {
  1051.             printf("Can't open %s\nExit\n", log);
  1052.             return 0;
  1053.         };
  1054.     }
  1055.     dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
  1056.  
  1057.     enum_pci_devices();
  1058.  
  1059.     ent = find_pci_device(&device, pciidlist);
  1060.  
  1061.     if( unlikely(ent == NULL) )
  1062.     {
  1063.         dbgprintf("device not found\n");
  1064.         return 0;
  1065.     };
  1066.  
  1067.     dbgprintf("device %x:%x\n", device.pci_dev.vendor,
  1068.                                 device.pci_dev.device);
  1069.  
  1070.     err = drm_get_dev(&device.pci_dev, ent);
  1071.  
  1072.     rdev = rdisplay->ddev->dev_private;
  1073.  
  1074.     err = RegService("DISPLAY", display_handler);
  1075.  
  1076.     if( err != 0)
  1077.         dbgprintf("Set DISPLAY handler\n");
  1078.  
  1079.     return err;
  1080. };
  1081.  
  1082. void drm_vblank_post_modeset(struct drm_device *dev, int crtc)
  1083. {};
  1084.  
  1085. void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
  1086. {};
  1087.  
  1088.