/drivers/video/drm/radeon/Makefile |
---|
33,17 → 33,6 |
NAME:= atikms |
HFILES:= $(DDK_INCLUDES)/linux/types.h \ |
$(DDK_INCLUDES)/linux/list.h \ |
$(DDK_INCLUDES)/linux/pci.h \ |
$(DDK_INCLUDES)/drm/drmP.h \ |
$(DDK_INCLUDES)/drm/drm_edid.h \ |
$(DDK_INCLUDES)/drm/drm_crtc.h \ |
$(DDK_INCLUDES)/drm/drm_mm.h \ |
atom.h \ |
radeon.h \ |
radeon_asic.h |
NAME_SRC= \ |
main.c \ |
pci.c \ |
231,7 → 220,7 |
all: $(NAME).dll |
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(HFILES) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a atikms.lds Makefile |
$(NAME).dll: $(NAME_OBJS) $(FW_BINS) $(SRC_DEP) $(LIBPATH)/libcore.a $(LIBPATH)/libddk.a atikms.lds Makefile |
$(LD) -L$(LIBPATH) $(LDFLAGS) -T atikms.lds -o $@ $(NAME_OBJS) $(LIBS) |
/drivers/video/drm/radeon/atom.c |
---|
66,9 → 66,10 |
static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); |
int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
static uint32_t atom_arg_mask[8] = |
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000, |
0xFF000000 }; |
static uint32_t atom_arg_mask[8] = { |
0xFFFFFFFF, 0x0000FFFF, 0x00FFFF00, 0xFFFF0000, |
0x000000FF, 0x0000FF00, 0x00FF0000, 0xFF000000 |
}; |
static int atom_arg_shift[8] = { 0, 0, 8, 16, 0, 8, 16, 24 }; |
static int atom_dst_to_src[8][4] = { |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
275,13 → 275,15 |
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) |
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); |
atombios_blank_crtc(crtc, ATOM_DISABLE); |
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); |
if (dev->num_crtcs > radeon_crtc->crtc_id) |
drm_vblank_on(dev, radeon_crtc->crtc_id); |
radeon_crtc_load_lut(crtc); |
break; |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
case DRM_MODE_DPMS_OFF: |
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
if (dev->num_crtcs > radeon_crtc->crtc_id) |
drm_vblank_off(dev, radeon_crtc->crtc_id); |
if (radeon_crtc->enabled) |
atombios_blank_crtc(crtc, ATOM_ENABLE); |
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) |
1665,11 → 1667,11 |
} |
int atombios_crtc_set_base_atomic(struct drm_crtc *crtc, |
struct drm_framebuffer *fb, |
struct drm_framebuffer *fb, |
int x, int y, enum mode_set_atomic state) |
{ |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
if (ASIC_IS_DCE4(rdev)) |
return dce4_crtc_do_set_base(crtc, fb, x, y, 1); |
/drivers/video/drm/radeon/atombios_dp.c |
---|
37,10 → 37,10 |
#define DP_DPCD_SIZE DP_RECEIVER_CAP_SIZE |
static char *voltage_names[] = { |
"0.4V", "0.6V", "0.8V", "1.2V" |
"0.4V", "0.6V", "0.8V", "1.2V" |
}; |
static char *pre_emph_names[] = { |
"0dB", "3.5dB", "6dB", "9.5dB" |
"0dB", "3.5dB", "6dB", "9.5dB" |
}; |
/***** radeon AUX functions *****/ |
305,7 → 305,7 |
/***** radeon specific DP functions *****/ |
int radeon_dp_get_dp_link_config(struct drm_connector *connector, |
const u8 dpcd[DP_DPCD_SIZE], |
const u8 dpcd[DP_DPCD_SIZE], |
unsigned pix_clock, |
unsigned *dp_lanes, unsigned *dp_rate) |
{ |
317,7 → 317,7 |
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == |
ENCODER_OBJECT_ID_NUTMEG) { |
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { |
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { |
max_pix_clock = (lane_num * 270000 * 8) / bpp; |
if (max_pix_clock >= pix_clock) { |
*dp_lanes = lane_num; |
328,15 → 328,15 |
} else { |
for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) { |
for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) { |
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; |
if (max_pix_clock >= pix_clock) { |
*dp_lanes = lane_num; |
*dp_rate = link_rates[i]; |
return 0; |
max_pix_clock = (lane_num * link_rates[i] * 8) / bpp; |
if (max_pix_clock >= pix_clock) { |
*dp_lanes = lane_num; |
*dp_rate = link_rates[i]; |
return 0; |
} |
} |
} |
} |
} |
} |
} |
return -EINVAL; |
} |
/drivers/video/drm/radeon/atombios_encoders.c |
---|
2629,16 → 2629,8 |
} |
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder, |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
return true; |
} |
static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = { |
.dpms = radeon_atom_ext_dpms, |
.mode_fixup = radeon_atom_ext_mode_fixup, |
.prepare = radeon_atom_ext_prepare, |
.mode_set = radeon_atom_ext_mode_set, |
.commit = radeon_atom_ext_commit, |
/drivers/video/drm/radeon/btc_dpm.c |
---|
1163,12 → 1163,11 |
155000, 160000, 165000, 170000, 175000, 180000, 185000, 190000, 195000, 200000 |
}; |
static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = |
{ |
{ 10000, 30000, RADEON_SCLK_UP }, |
{ 15000, 30000, RADEON_SCLK_UP }, |
{ 20000, 30000, RADEON_SCLK_UP }, |
{ 25000, 30000, RADEON_SCLK_UP } |
static const struct radeon_blacklist_clocks btc_blacklist_clocks[] = { |
{ 10000, 30000, RADEON_SCLK_UP }, |
{ 15000, 30000, RADEON_SCLK_UP }, |
{ 20000, 30000, RADEON_SCLK_UP }, |
{ 25000, 30000, RADEON_SCLK_UP } |
}; |
void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table, |
1637,14 → 1636,14 |
cypress_populate_smc_voltage_tables(rdev, table); |
switch (rdev->pm.int_thermal_type) { |
case THERMAL_TYPE_EVERGREEN: |
case THERMAL_TYPE_EMC2103_WITH_INTERNAL: |
case THERMAL_TYPE_EVERGREEN: |
case THERMAL_TYPE_EMC2103_WITH_INTERNAL: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; |
break; |
case THERMAL_TYPE_NONE: |
case THERMAL_TYPE_NONE: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; |
break; |
default: |
default: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; |
break; |
} |
1860,37 → 1859,37 |
case MC_SEQ_RAS_TIMING >> 2: |
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2; |
break; |
case MC_SEQ_CAS_TIMING >> 2: |
case MC_SEQ_CAS_TIMING >> 2: |
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2; |
break; |
case MC_SEQ_MISC_TIMING >> 2: |
case MC_SEQ_MISC_TIMING >> 2: |
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2; |
break; |
case MC_SEQ_MISC_TIMING2 >> 2: |
case MC_SEQ_MISC_TIMING2 >> 2: |
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; |
break; |
case MC_SEQ_RD_CTL_D0 >> 2: |
case MC_SEQ_RD_CTL_D0 >> 2: |
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; |
break; |
case MC_SEQ_RD_CTL_D1 >> 2: |
case MC_SEQ_RD_CTL_D1 >> 2: |
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_D0 >> 2: |
case MC_SEQ_WR_CTL_D0 >> 2: |
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_D1 >> 2: |
case MC_SEQ_WR_CTL_D1 >> 2: |
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; |
break; |
case MC_PMG_CMD_EMRS >> 2: |
case MC_PMG_CMD_EMRS >> 2: |
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; |
break; |
case MC_PMG_CMD_MRS >> 2: |
case MC_PMG_CMD_MRS >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; |
break; |
case MC_PMG_CMD_MRS1 >> 2: |
case MC_PMG_CMD_MRS1 >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; |
break; |
default: |
default: |
result = false; |
break; |
} |
/drivers/video/drm/radeon/ci_dpm.c |
---|
192,9 → 192,9 |
static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) |
{ |
struct ci_power_info *pi = rdev->pm.dpm.priv; |
struct ci_power_info *pi = rdev->pm.dpm.priv; |
return pi; |
return pi; |
} |
static struct ci_ps *ci_get_ps(struct radeon_ps *rps) |
1632,7 → 1632,7 |
else |
power_limit = (u32)(cac_tdp_table->battery_power_limit * 256); |
ci_set_power_limit(rdev, power_limit); |
ci_set_power_limit(rdev, power_limit); |
if (pi->caps_automatic_dc_transition) { |
if (ac_power) |
2017,9 → 2017,9 |
{ |
u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL); |
tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); |
tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | |
DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); |
tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK); |
tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) | |
DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK)); |
WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp); |
} |
2938,8 → 2938,8 |
memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE); |
memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases); |
memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); |
memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); |
memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE); |
memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE); |
memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency); |
memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel); |
3152,7 → 3152,7 |
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; |
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); |
spll_func_cntl_3 |= SPLL_DITHEN; |
spll_func_cntl_3 |= SPLL_DITHEN; |
if (pi->caps_sclk_ss_support) { |
struct radeon_atom_ss ss; |
3229,7 → 3229,7 |
graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; |
graphic_level->Flags = cpu_to_be32(graphic_level->Flags); |
graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); |
graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE); |
graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases); |
graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency); |
graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel); |
4393,7 → 4393,7 |
break; |
case MC_SEQ_CAS_TIMING >> 2: |
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2; |
break; |
break; |
case MC_SEQ_MISC_TIMING >> 2: |
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2; |
break; |
4625,7 → 4625,7 |
if (ret) |
goto init_mc_done; |
ret = ci_copy_vbios_mc_reg_table(table, ci_table); |
ret = ci_copy_vbios_mc_reg_table(table, ci_table); |
if (ret) |
goto init_mc_done; |
4916,7 → 4916,7 |
allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk; |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = |
allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v; |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = |
rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = |
allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v; |
return 0; |
5517,7 → 5517,7 |
struct _NonClockInfoArray *non_clock_info_array; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
u8 *power_state_offset; |
struct ci_ps *ps; |
5693,8 → 5693,8 |
return ret; |
} |
pi->dll_default_on = false; |
pi->sram_end = SMC_RAM_END; |
pi->dll_default_on = false; |
pi->sram_end = SMC_RAM_END; |
pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT; |
pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT; |
5734,9 → 5734,9 |
pi->caps_uvd_dpm = true; |
pi->caps_vce_dpm = true; |
ci_get_leakage_voltages(rdev); |
ci_patch_dependency_tables_with_leakage(rdev); |
ci_set_private_data_variables_based_on_pptable(rdev); |
ci_get_leakage_voltages(rdev); |
ci_patch_dependency_tables_with_leakage(rdev); |
ci_set_private_data_variables_based_on_pptable(rdev); |
rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = |
kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL); |
5839,7 → 5839,7 |
pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2; |
else |
rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL; |
} |
} |
if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) { |
if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT)) |
5860,7 → 5860,7 |
#endif |
if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, |
&frev, &crev, &data_offset)) { |
&frev, &crev, &data_offset)) { |
pi->caps_sclk_ss_support = true; |
pi->caps_mclk_ss_support = true; |
pi->dynamic_ss = true; |
/drivers/video/drm/radeon/ci_smc.c |
---|
194,11 → 194,11 |
return PPSMC_Result_OK; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); |
if ((tmp & CKEN) == 0) |
tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); |
if ((tmp & CKEN) == 0) |
break; |
udelay(1); |
} |
udelay(1); |
} |
return PPSMC_Result_OK; |
} |
/drivers/video/drm/radeon/cik.c |
---|
1712,7 → 1712,7 |
*/ |
u32 cik_get_xclk(struct radeon_device *rdev) |
{ |
u32 reference_clock = rdev->clock.spll.reference_freq; |
u32 reference_clock = rdev->clock.spll.reference_freq; |
if (rdev->flags & RADEON_IS_IGP) { |
if (RREG32_SMC(GENERAL_PWRMGT) & GPU_COUNTER_CLK) |
2343,9 → 2343,13 |
*/ |
static void cik_tiling_mode_table_init(struct radeon_device *rdev) |
{ |
const u32 num_tile_mode_states = 32; |
const u32 num_secondary_tile_mode_states = 16; |
u32 reg_offset, gb_tile_moden, split_equal_to_row_size; |
u32 *tile = rdev->config.cik.tile_mode_array; |
u32 *macrotile = rdev->config.cik.macrotile_mode_array; |
const u32 num_tile_mode_states = |
ARRAY_SIZE(rdev->config.cik.tile_mode_array); |
const u32 num_secondary_tile_mode_states = |
ARRAY_SIZE(rdev->config.cik.macrotile_mode_array); |
u32 reg_offset, split_equal_to_row_size; |
u32 num_pipe_configs; |
u32 num_rbs = rdev->config.cik.max_backends_per_se * |
rdev->config.cik.max_shader_engines; |
2367,1032 → 2371,669 |
if (num_pipe_configs > 8) |
num_pipe_configs = 16; |
if (num_pipe_configs == 16) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
break; |
case 1: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
break; |
case 2: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 3: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
break; |
case 4: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 5: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
break; |
case 6: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 7: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 8: |
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); |
break; |
case 9: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
break; |
case 10: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 11: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 12: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 13: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
break; |
case 14: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 16: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 17: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 27: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
break; |
case 28: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 29: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 30: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 1: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 2: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 3: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 4: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 5: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
break; |
case 6: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
break; |
case 8: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 9: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 10: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 11: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 12: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
break; |
case 13: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
break; |
case 14: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
} else if (num_pipe_configs == 8) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
break; |
case 1: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
break; |
case 2: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 3: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
break; |
case 4: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 5: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
break; |
case 6: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 7: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 8: |
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); |
break; |
case 9: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
break; |
case 10: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 11: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 12: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 13: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
break; |
case 14: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 16: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 17: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 27: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
break; |
case 28: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 29: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 30: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 1: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 2: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 3: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 4: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 5: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
break; |
case 6: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
break; |
case 8: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 9: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 10: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 11: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 12: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 13: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
break; |
case 14: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
} else if (num_pipe_configs == 4) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
tile[reg_offset] = 0; |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) |
macrotile[reg_offset] = 0; |
switch(num_pipe_configs) { |
case 16: |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16)); |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]); |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]); |
break; |
case 8: |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16)); |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
tile[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_2_BANK)); |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]); |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]); |
break; |
case 4: |
if (num_rbs == 4) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
break; |
case 1: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
break; |
case 2: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 3: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
break; |
case 4: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 5: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
break; |
case 6: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 7: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 8: |
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16)); |
break; |
case 9: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
break; |
case 10: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 11: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 12: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 13: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
break; |
case 14: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 16: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 17: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 27: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
break; |
case 28: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 29: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 30: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16)); |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_16x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
} else if (num_rbs < 4) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
break; |
case 1: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
break; |
case 2: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 3: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
break; |
case 4: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 5: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
break; |
case 6: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 7: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 8: |
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16)); |
break; |
case 9: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
break; |
case 10: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 11: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 12: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 13: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
break; |
case 14: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 16: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 17: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 27: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
break; |
case 28: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 29: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 30: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16)); |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING)); |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING)); |
tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
} |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 1: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 2: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 3: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 4: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 5: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 6: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
break; |
case 8: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 9: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 10: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 11: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 12: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 13: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 14: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
} else if (num_pipe_configs == 2) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
break; |
case 1: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
break; |
case 2: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 3: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
break; |
case 4: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 5: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
break; |
case 6: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
break; |
case 7: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(split_equal_to_row_size)); |
break; |
case 8: |
gb_tile_moden = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P2); |
break; |
case 9: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2)); |
break; |
case 10: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 11: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 12: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 13: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
break; |
case 14: |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 16: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 17: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 27: |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2)); |
break; |
case 28: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 29: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
case 30: |
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 1: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 2: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 3: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 4: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 5: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 6: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
case 8: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 9: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 10: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 11: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 12: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 13: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
break; |
case 14: |
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
} else |
macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) | |
NUM_BANKS(ADDR_SURF_4_BANK)); |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]); |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]); |
break; |
case 2: |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B)); |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B)); |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B)); |
tile[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING)); |
tile[6] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B)); |
tile[7] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
TILE_SPLIT(split_equal_to_row_size)); |
tile[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
PIPE_CONFIG(ADDR_SURF_P2); |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2)); |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING)); |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[17] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2)); |
tile[28] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
tile[30] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1) | |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P2) | |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2)); |
macrotile[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
macrotile[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) | |
NUM_BANKS(ADDR_SURF_16_BANK)); |
macrotile[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) | |
NUM_BANKS(ADDR_SURF_8_BANK)); |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]); |
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) |
WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), macrotile[reg_offset]); |
break; |
default: |
DRM_ERROR("unknown num pipe config: 0x%x\n", num_pipe_configs); |
} |
} |
/** |
4219,13 → 3860,20 |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
return r; |
} |
r = radeon_fence_wait(ib.fence, false); |
if (r) { |
r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( |
RADEON_USEC_IB_TEST_TIMEOUT)); |
if (r < 0) { |
DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
radeon_scratch_free(rdev, scratch); |
radeon_ib_free(rdev, &ib); |
return r; |
} else if (r == 0) { |
DRM_ERROR("radeon: fence wait timed out.\n"); |
radeon_scratch_free(rdev, scratch); |
radeon_ib_free(rdev, &ib); |
return -ETIMEDOUT; |
} |
r = 0; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(scratch); |
if (tmp == 0xDEADBEEF) |
9625,13 → 9273,13 |
mutex_lock(&rdev->gpu_clock_mutex); |
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); |
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
mutex_unlock(&rdev->gpu_clock_mutex); |
return clock; |
} |
static int cik_set_uvd_clock(struct radeon_device *rdev, u32 clock, |
u32 cntl_reg, u32 status_reg) |
u32 cntl_reg, u32 status_reg) |
{ |
int r, i; |
struct atom_clock_dividers dividers; |
/drivers/video/drm/radeon/cik_sdma.c |
---|
737,11 → 737,16 |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
return r; |
} |
r = radeon_fence_wait(ib.fence, false); |
if (r) { |
r = radeon_fence_wait_timeout(ib.fence, false, usecs_to_jiffies( |
RADEON_USEC_IB_TEST_TIMEOUT)); |
if (r < 0) { |
DRM_ERROR("radeon: fence wait failed (%d).\n", r); |
return r; |
} else if (r == 0) { |
DRM_ERROR("radeon: fence wait timed out.\n"); |
return -ETIMEDOUT; |
} |
r = 0; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = le32_to_cpu(rdev->wb.wb[index/4]); |
if (tmp == 0xDEADBEEF) |
/drivers/video/drm/radeon/cypress_dpm.c |
---|
1620,14 → 1620,14 |
cypress_populate_smc_voltage_tables(rdev, table); |
switch (rdev->pm.int_thermal_type) { |
case THERMAL_TYPE_EVERGREEN: |
case THERMAL_TYPE_EMC2103_WITH_INTERNAL: |
case THERMAL_TYPE_EVERGREEN: |
case THERMAL_TYPE_EMC2103_WITH_INTERNAL: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; |
break; |
case THERMAL_TYPE_NONE: |
case THERMAL_TYPE_NONE: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; |
break; |
default: |
default: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; |
break; |
} |
/drivers/video/drm/radeon/evergreen.c |
---|
1140,7 → 1140,7 |
int r, i; |
struct atom_clock_dividers dividers; |
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
clock, false, ÷rs); |
if (r) |
return r; |
/drivers/video/drm/radeon/evergreen_cs.c |
---|
1816,8 → 1816,8 |
} |
offset = reloc->gpu_offset + |
(idx_value & 0xfffffff0) + |
((u64)(tmp & 0xff) << 32); |
(idx_value & 0xfffffff0) + |
((u64)(tmp & 0xff) << 32); |
ib[idx + 0] = offset; |
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); |
1862,8 → 1862,8 |
} |
offset = reloc->gpu_offset + |
idx_value + |
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
idx_value + |
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
ib[idx+0] = offset; |
ib[idx+1] = upper_32_bits(offset) & 0xff; |
1897,8 → 1897,8 |
} |
offset = reloc->gpu_offset + |
idx_value + |
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
idx_value + |
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
ib[idx+0] = offset; |
ib[idx+1] = upper_32_bits(offset) & 0xff; |
1925,8 → 1925,8 |
} |
offset = reloc->gpu_offset + |
radeon_get_ib_value(p, idx+1) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
radeon_get_ib_value(p, idx+1) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = offset; |
ib[idx+2] = upper_32_bits(offset) & 0xff; |
2098,8 → 2098,8 |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); |
ib[idx+2] = upper_32_bits(offset) & 0xff; |
2239,8 → 2239,8 |
return -EINVAL; |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = offset & 0xfffffff8; |
ib[idx+2] = upper_32_bits(offset) & 0xff; |
2261,8 → 2261,8 |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = offset & 0xfffffffc; |
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); |
2283,8 → 2283,8 |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = offset & 0xfffffffc; |
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); |
/drivers/video/drm/radeon/evergreen_hdmi.c |
---|
206,7 → 206,7 |
* build a AVI Info Frame |
*/ |
void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset, |
unsigned char *buffer, size_t size) |
unsigned char *buffer, size_t size) |
{ |
uint8_t *frame = buffer + 3; |
/drivers/video/drm/radeon/kv_dpm.c |
---|
2640,7 → 2640,7 |
struct _NonClockInfoArray *non_clock_info_array; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
u8 *power_state_offset; |
struct kv_ps *ps; |
2738,7 → 2738,7 |
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) |
pi->at[i] = TRINITY_AT_DFLT; |
pi->sram_end = SMC_RAM_END; |
pi->sram_end = SMC_RAM_END; |
/* Enabling nb dpm on an asrock system prevents dpm from working */ |
if (rdev->pdev->subsystem_vendor == 0x1849) |
/drivers/video/drm/radeon/main.c |
---|
5,7 → 5,7 |
#include "radeon.h" |
#include "bitmap.h" |
#define DRV_NAME "atikms v4.5.7" |
#define DRV_NAME "atikms v4.6.7" |
void __init dmi_scan_machine(void); |
int printf ( const char * format, ... ); |
/drivers/video/drm/radeon/ni.c |
---|
1257,7 → 1257,7 |
tmp = RREG32_CG(CG_CGTT_LOCAL_0); |
tmp &= ~0x00380000; |
WREG32_CG(CG_CGTT_LOCAL_0, tmp); |
tmp = RREG32_CG(CG_CGTT_LOCAL_1); |
tmp = RREG32_CG(CG_CGTT_LOCAL_1); |
tmp &= ~0x0e000000; |
WREG32_CG(CG_CGTT_LOCAL_1, tmp); |
} |
2593,7 → 2593,7 |
struct atom_clock_dividers dividers; |
int r, i; |
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
r = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
ecclk, false, ÷rs); |
if (r) |
return r; |
/drivers/video/drm/radeon/ni_dpm.c |
---|
725,9 → 725,9 |
struct ni_power_info *ni_get_pi(struct radeon_device *rdev) |
{ |
struct ni_power_info *pi = rdev->pm.dpm.priv; |
struct ni_power_info *pi = rdev->pm.dpm.priv; |
return pi; |
return pi; |
} |
struct ni_ps *ni_get_ps(struct radeon_ps *rps) |
1096,9 → 1096,9 |
static int ni_process_firmware_header(struct radeon_device *rdev) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
u32 tmp; |
int ret; |
1202,14 → 1202,14 |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
if (pi->gfx_clock_gating) { |
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); |
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN); |
WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON); |
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); |
WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON); |
RREG32(GB_ADDR_CONFIG); |
} |
} |
WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower), |
~HOST_SMC_MSG_MASK); |
~HOST_SMC_MSG_MASK); |
udelay(25000); |
1321,12 → 1321,12 |
u32 mclk, |
NISLANDS_SMC_VOLTAGE_VALUE *voltage) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
if (!pi->mvdd_control) { |
voltage->index = eg_pi->mvdd_high_index; |
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); |
voltage->value = cpu_to_be16(MVDD_HIGH_VALUE); |
return; |
} |
1510,47 → 1510,47 |
u32 mc_cg_config; |
switch (arb_freq_src) { |
case MC_CG_ARB_FREQ_F0: |
case MC_CG_ARB_FREQ_F0: |
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); |
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; |
break; |
case MC_CG_ARB_FREQ_F1: |
case MC_CG_ARB_FREQ_F1: |
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); |
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); |
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; |
break; |
case MC_CG_ARB_FREQ_F2: |
case MC_CG_ARB_FREQ_F2: |
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); |
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); |
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; |
break; |
case MC_CG_ARB_FREQ_F3: |
case MC_CG_ARB_FREQ_F3: |
mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); |
mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); |
burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; |
break; |
default: |
default: |
return -EINVAL; |
} |
switch (arb_freq_dest) { |
case MC_CG_ARB_FREQ_F0: |
case MC_CG_ARB_FREQ_F0: |
WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); |
WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); |
WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); |
break; |
case MC_CG_ARB_FREQ_F1: |
case MC_CG_ARB_FREQ_F1: |
WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); |
WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); |
WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); |
break; |
case MC_CG_ARB_FREQ_F2: |
case MC_CG_ARB_FREQ_F2: |
WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); |
WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); |
WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); |
break; |
case MC_CG_ARB_FREQ_F3: |
case MC_CG_ARB_FREQ_F3: |
WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); |
WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); |
WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); |
1621,9 → 1621,7 |
(u8)rv770_calculate_memory_refresh_rate(rdev, pl->sclk); |
radeon_atom_set_engine_dram_timings(rdev, |
pl->sclk, |
pl->mclk); |
radeon_atom_set_engine_dram_timings(rdev, pl->sclk, pl->mclk); |
dram_timing = RREG32(MC_ARB_DRAM_TIMING); |
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
1867,9 → 1865,9 |
mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN; |
if (pi->mem_gddr5) |
mpll_dq_func_cntl &= ~PDNB; |
mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS; |
if (pi->mem_gddr5) |
mpll_dq_func_cntl &= ~PDNB; |
mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN | BYPASS; |
mclk_pwrmgt_cntl |= (MRDCKA0_RESET | |
1891,15 → 1889,15 |
MRDCKD1_PDNB); |
dll_cntl |= (MRDCKA0_BYPASS | |
MRDCKA1_BYPASS | |
MRDCKB0_BYPASS | |
MRDCKB1_BYPASS | |
MRDCKC0_BYPASS | |
MRDCKC1_BYPASS | |
MRDCKD0_BYPASS | |
MRDCKD1_BYPASS); |
MRDCKA1_BYPASS | |
MRDCKB0_BYPASS | |
MRDCKB1_BYPASS | |
MRDCKC0_BYPASS | |
MRDCKC1_BYPASS | |
MRDCKD0_BYPASS | |
MRDCKD1_BYPASS); |
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; |
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; |
spll_func_cntl_2 |= SCLK_MUX_SEL(4); |
table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); |
2089,7 → 2087,7 |
static int ni_init_smc_spll_table(struct radeon_device *rdev) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
SMC_NISLANDS_SPLL_DIV_TABLE *spll_table; |
NISLANDS_SMC_SCLK_VALUE sclk_params; |
2311,8 → 2309,8 |
NISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
int ret; |
bool dll_state_on; |
u16 std_vddc; |
2391,8 → 2389,8 |
struct radeon_ps *radeon_state, |
NISLANDS_SMC_SWSTATE *smc_state) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_ps *state = ni_get_ps(radeon_state); |
u32 a_t; |
u32 t_l, t_h; |
2451,8 → 2449,8 |
struct radeon_ps *radeon_state, |
NISLANDS_SMC_SWSTATE *smc_state) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_ps *state = ni_get_ps(radeon_state); |
u32 prev_sclk; |
2595,7 → 2593,7 |
struct radeon_ps *radeon_new_state, |
bool enable) |
{ |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
PPSMC_Result smc_result; |
int ret = 0; |
2625,7 → 2623,7 |
struct radeon_ps *radeon_state, |
NISLANDS_SMC_SWSTATE *smc_state) |
{ |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_ps *state = ni_get_ps(radeon_state); |
int i, ret; |
2770,46 → 2768,46 |
bool result = true; |
switch (in_reg) { |
case MC_SEQ_RAS_TIMING >> 2: |
case MC_SEQ_RAS_TIMING >> 2: |
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2; |
break; |
case MC_SEQ_CAS_TIMING >> 2: |
case MC_SEQ_CAS_TIMING >> 2: |
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2; |
break; |
case MC_SEQ_MISC_TIMING >> 2: |
case MC_SEQ_MISC_TIMING >> 2: |
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2; |
break; |
case MC_SEQ_MISC_TIMING2 >> 2: |
case MC_SEQ_MISC_TIMING2 >> 2: |
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; |
break; |
case MC_SEQ_RD_CTL_D0 >> 2: |
case MC_SEQ_RD_CTL_D0 >> 2: |
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; |
break; |
case MC_SEQ_RD_CTL_D1 >> 2: |
case MC_SEQ_RD_CTL_D1 >> 2: |
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_D0 >> 2: |
case MC_SEQ_WR_CTL_D0 >> 2: |
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_D1 >> 2: |
case MC_SEQ_WR_CTL_D1 >> 2: |
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; |
break; |
case MC_PMG_CMD_EMRS >> 2: |
case MC_PMG_CMD_EMRS >> 2: |
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; |
break; |
case MC_PMG_CMD_MRS >> 2: |
case MC_PMG_CMD_MRS >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; |
break; |
case MC_PMG_CMD_MRS1 >> 2: |
case MC_PMG_CMD_MRS1 >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; |
break; |
case MC_SEQ_PMG_TIMING >> 2: |
case MC_SEQ_PMG_TIMING >> 2: |
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2; |
break; |
case MC_PMG_CMD_MRS2 >> 2: |
case MC_PMG_CMD_MRS2 >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; |
break; |
default: |
default: |
result = false; |
break; |
} |
2876,9 → 2874,9 |
struct ni_mc_reg_table *ni_table = &ni_pi->mc_reg_table; |
u8 module_index = rv770_get_memory_module_index(rdev); |
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); |
if (!table) |
return -ENOMEM; |
table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); |
if (!table) |
return -ENOMEM; |
WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); |
WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); |
2896,25 → 2894,25 |
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); |
if (ret) |
goto init_mc_done; |
if (ret) |
goto init_mc_done; |
ret = ni_copy_vbios_mc_reg_table(table, ni_table); |
if (ret) |
goto init_mc_done; |
if (ret) |
goto init_mc_done; |
ni_set_s0_mc_reg_index(ni_table); |
ret = ni_set_mc_special_registers(rdev, ni_table); |
if (ret) |
goto init_mc_done; |
if (ret) |
goto init_mc_done; |
ni_set_valid_flag(ni_table); |
init_mc_done: |
kfree(table); |
kfree(table); |
return ret; |
} |
2994,7 → 2992,7 |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_ps *boot_state = ni_get_ps(radeon_boot_state); |
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table; |
3025,7 → 3023,7 |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_ps *ni_new_state = ni_get_ps(radeon_new_state); |
SMC_NIslands_MCRegisters *mc_reg_table = &ni_pi->smc_mc_reg_table; |
u16 address; |
3142,7 → 3140,7 |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
PP_NIslands_CACTABLES *cac_tables = NULL; |
int i, ret; |
u32 reg; |
u32 reg; |
if (ni_pi->enable_cac == false) |
return 0; |
3422,13 → 3420,13 |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
if ((perf_req == PCIE_PERF_REQ_PECI_GEN1) || |
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) { |
(perf_req == PCIE_PERF_REQ_PECI_GEN2)) { |
if (eg_pi->pcie_performance_request_registered == false) |
radeon_acpi_pcie_notify_device_ready(rdev); |
eg_pi->pcie_performance_request_registered = true; |
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise); |
} else if ((perf_req == PCIE_PERF_REQ_REMOVE_REGISTRY) && |
eg_pi->pcie_performance_request_registered) { |
eg_pi->pcie_performance_request_registered) { |
eg_pi->pcie_performance_request_registered = false; |
return radeon_acpi_pcie_performance_request(rdev, perf_req, advertise); |
} |
3441,12 → 3439,12 |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
u32 tmp; |
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); |
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); |
if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && |
(tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) |
pi->pcie_gen2 = true; |
else |
if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) && |
(tmp & LC_OTHER_SIDE_SUPPORTS_GEN2)) |
pi->pcie_gen2 = true; |
else |
pi->pcie_gen2 = false; |
if (!pi->pcie_gen2) |
3458,8 → 3456,8 |
static void ni_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev, |
bool enable) |
{ |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
u32 tmp, bif; |
struct rv7xx_power_info *pi = rv770_get_pi(rdev); |
u32 tmp, bif; |
tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL); |
3502,7 → 3500,7 |
if (enable) |
WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE); |
else |
WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); |
WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE); |
} |
void ni_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, |
3563,7 → 3561,7 |
{ |
struct ni_ps *new_ps = ni_get_ps(rps); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
eg_pi->current_rps = *rps; |
ni_pi->current_ps = *new_ps; |
3575,7 → 3573,7 |
{ |
struct ni_ps *new_ps = ni_get_ps(rps); |
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
struct ni_power_info *ni_pi = ni_get_pi(rdev); |
eg_pi->requested_rps = *rps; |
ni_pi->requested_ps = *new_ps; |
3591,8 → 3589,8 |
if (pi->gfx_clock_gating) |
ni_cg_clockgating_default(rdev); |
if (btc_dpm_enabled(rdev)) |
return -EINVAL; |
if (btc_dpm_enabled(rdev)) |
return -EINVAL; |
if (pi->mg_clock_gating) |
ni_mg_clockgating_default(rdev); |
if (eg_pi->ls_clock_gating) |
3991,7 → 3989,7 |
union pplib_clock_info *clock_info; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
struct ni_ps *ps; |
/drivers/video/drm/radeon/ni_reg.h |
---|
109,6 → 109,8 |
#define NI_DP_MSE_SAT2 0x7398 |
#define NI_DP_MSE_SAT_UPDATE 0x739c |
# define NI_DP_MSE_SAT_UPDATE_MASK 0x3 |
# define NI_DP_MSE_16_MTP_KEEPOUT 0x100 |
#define NI_DIG_BE_CNTL 0x7140 |
# define NI_DIG_FE_SOURCE_SELECT(x) (((x) & 0x7f) << 8) |
/drivers/video/drm/radeon/pci.c |
---|
17,6 → 17,9 |
#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) |
#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */ |
#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */ |
/* |
* Translate the low bits of the PCI base |
* to the resource type |
/drivers/video/drm/radeon/r300.c |
---|
1547,6 → 1547,7 |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/r420.c |
---|
416,6 → 416,7 |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/r520.c |
---|
297,6 → 297,7 |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
rv370_pcie_gart_fini(rdev); |
radeon_agp_fini(rdev); |
rdev->accel_working = false; |
} |
return 0; |
/drivers/video/drm/radeon/r600.c |
---|
235,8 → 235,8 |
fb_div |= 1; |
r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); |
if (r) |
return r; |
if (r) |
return r; |
/* assert PLL_RESET */ |
WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); |
1490,7 → 1490,7 |
rdev->fastfb_working = true; |
} |
} |
} |
} |
} |
radeon_update_bandwidth_info(rdev); |
4521,7 → 4521,7 |
mutex_lock(&rdev->gpu_clock_mutex); |
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); |
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
mutex_unlock(&rdev->gpu_clock_mutex); |
return clock; |
} |
/drivers/video/drm/radeon/r600_cs.c |
---|
1671,8 → 1671,8 |
} |
offset = reloc->gpu_offset + |
(idx_value & 0xfffffff0) + |
((u64)(tmp & 0xff) << 32); |
(idx_value & 0xfffffff0) + |
((u64)(tmp & 0xff) << 32); |
ib[idx + 0] = offset; |
ib[idx + 1] = (tmp & 0xffffff00) | (upper_32_bits(offset) & 0xff); |
1712,8 → 1712,8 |
} |
offset = reloc->gpu_offset + |
idx_value + |
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
idx_value + |
((u64)(radeon_get_ib_value(p, idx+1) & 0xff) << 32); |
ib[idx+0] = offset; |
ib[idx+1] = upper_32_bits(offset) & 0xff; |
1764,8 → 1764,8 |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffff0) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffff0) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffff0); |
ib[idx+2] = upper_32_bits(offset) & 0xff; |
1876,8 → 1876,8 |
return -EINVAL; |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffff8) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = offset & 0xfffffff8; |
ib[idx+2] = upper_32_bits(offset) & 0xff; |
1898,8 → 1898,8 |
} |
offset = reloc->gpu_offset + |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
(radeon_get_ib_value(p, idx+1) & 0xfffffffc) + |
((u64)(radeon_get_ib_value(p, idx+2) & 0xff) << 32); |
ib[idx+1] = offset & 0xfffffffc; |
ib[idx+2] = (ib[idx+2] & 0xffffff00) | (upper_32_bits(offset) & 0xff); |
/drivers/video/drm/radeon/r600_dpm.c |
---|
844,7 → 844,7 |
struct radeon_mode_info *mode_info = &rdev->mode_info; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
874,7 → 874,7 |
union fan_info *fan_info; |
ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
int ret, i; |
1070,7 → 1070,7 |
ext_hdr->usVCETableOffset) { |
VCEClockInfoArray *array = (VCEClockInfoArray *) |
(mode_info->atom_context->bios + data_offset + |
le16_to_cpu(ext_hdr->usVCETableOffset) + 1); |
le16_to_cpu(ext_hdr->usVCETableOffset) + 1); |
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = |
(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) |
(mode_info->atom_context->bios + data_offset + |
/drivers/video/drm/radeon/r600_hdmi.c |
---|
215,7 → 215,7 |
* build a HDMI Video Info Frame |
*/ |
void r600_set_avi_packet(struct radeon_device *rdev, u32 offset, |
unsigned char *buffer, size_t size) |
unsigned char *buffer, size_t size) |
{ |
uint8_t *frame = buffer + 3; |
312,7 → 312,7 |
} |
void r600_hdmi_audio_set_dto(struct radeon_device *rdev, |
struct radeon_crtc *crtc, unsigned int clock) |
struct radeon_crtc *crtc, unsigned int clock) |
{ |
struct radeon_encoder *radeon_encoder; |
struct radeon_encoder_atom_dig *dig; |
/drivers/video/drm/radeon/radeon.h |
---|
126,6 → 126,7 |
*/ |
#define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
#define RADEON_USEC_IB_TEST_TIMEOUT 1000000 /* 1s */ |
/* RADEON_IB_POOL_SIZE must be a power of 2 */ |
#define RADEON_IB_POOL_SIZE 16 |
#define RADEON_DEBUGFS_MAX_COMPONENTS 32 |
388,6 → 389,7 |
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); |
void radeon_fence_process(struct radeon_device *rdev, int ring); |
bool radeon_fence_signaled(struct radeon_fence *fence); |
long radeon_fence_wait_timeout(struct radeon_fence *fence, bool interruptible, long timeout); |
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); |
int radeon_fence_wait_next(struct radeon_device *rdev, int ring); |
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring); |
474,7 → 476,6 |
/* protected by bo being reserved */ |
struct list_head bo_list; |
uint32_t flags; |
uint64_t addr; |
struct radeon_fence *last_pt_update; |
unsigned ref_count; |
/drivers/video/drm/radeon/radeon_atombios.c |
---|
2095,7 → 2095,7 |
struct radeon_i2c_bus_rec i2c_bus; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
2575,7 → 2575,7 |
bool valid; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
if (!atom_parse_data_header(mode_info->atom_context, index, NULL, |
2666,7 → 2666,7 |
bool valid; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
u8 *power_state_offset; |
/drivers/video/drm/radeon/radeon_bios.c |
---|
31,6 → 31,7 |
#include "atom.h" |
#include <linux/slab.h> |
#include <linux/acpi.h> |
/* |
* BIOS. |
*/ |
/drivers/video/drm/radeon/radeon_combios.c |
---|
34,7 → 34,6 |
#include <asm/machdep.h> |
#include <asm/pmac_feature.h> |
#include <asm/prom.h> |
#include <asm/pci-bridge.h> |
#endif /* CONFIG_PPC_PMAC */ |
/* from radeon_legacy_encoder.c */ |
/drivers/video/drm/radeon/radeon_connectors.c |
---|
34,6 → 34,7 |
#include "atom.h" |
#include <linux/pm_runtime.h> |
#include <linux/vga_switcheroo.h> |
static int radeon_dp_handle_hpd(struct drm_connector *connector) |
{ |
344,6 → 345,11 |
else if (radeon_connector->ddc_bus) |
radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
&radeon_connector->ddc_bus->adapter); |
} else if (vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC && |
connector->connector_type == DRM_MODE_CONNECTOR_LVDS && |
radeon_connector->ddc_bus) { |
radeon_connector->edid = drm_get_edid_switcheroo(&radeon_connector->base, |
&radeon_connector->ddc_bus->adapter); |
} else if (radeon_connector->ddc_bus) { |
radeon_connector->edid = drm_get_edid(&radeon_connector->base, |
&radeon_connector->ddc_bus->adapter); |
/drivers/video/drm/radeon/radeon_device.c |
---|
31,6 → 31,7 |
#include <drm/drm_crtc_helper.h> |
#include <drm/radeon_drm.h> |
#include <linux/vgaarb.h> |
#include <linux/vga_switcheroo.h> |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "atom.h" |
1203,9 → 1204,9 |
radeon_vm_size = 4; |
} |
/* |
* Max GPUVM size for Cayman, SI and CI are 40 bits. |
*/ |
/* |
* Max GPUVM size for Cayman, SI and CI are 40 bits. |
*/ |
if (radeon_vm_size > 1024) { |
dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n", |
radeon_vm_size); |
1278,9 → 1279,9 |
} |
rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS); |
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n", |
radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
pdev->subsystem_vendor, pdev->subsystem_device); |
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", |
radeon_family_name[rdev->family], pdev->vendor, pdev->device, |
pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); |
/* mutex initialization are all done here so we |
* can recall function without having locking issues */ |
1348,13 → 1349,6 |
(rdev->family <= CHIP_RS740)) |
rdev->need_dma32 = true; |
dma_bits = rdev->need_dma32 ? 32 : 40; |
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
if (r) { |
rdev->need_dma32 = true; |
dma_bits = 32; |
printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
} |
/* Registers mapping */ |
/* TODO: block userspace mapping of io register */ |
/drivers/video/drm/radeon/radeon_display.c |
---|
351,6 → 351,7 |
/* drop the power reference we got coming in here */ |
// pm_runtime_put_autosuspend(dev->dev); |
return ret; |
} |
static const struct drm_crtc_funcs radeon_crtc_funcs = { |
584,7 → 585,7 |
*den /= tmp; |
/* make sure nominator is large enough */ |
if (*nom < nom_min) { |
if (*nom < nom_min) { |
tmp = DIV_ROUND_UP(nom_min, *nom); |
*nom *= tmp; |
*den *= tmp; |
624,7 → 625,7 |
*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); |
/* limit fb divider to its maximum */ |
if (*fb_div > fb_div_max) { |
if (*fb_div > fb_div_max) { |
*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); |
*fb_div = fb_div_max; |
} |
1326,6 → 1327,9 |
{ |
kfree(rdev->mode_info.bios_hardcoded_edid); |
/* free i2c buses */ |
radeon_i2c_fini(rdev); |
if (rdev->mode_info.mode_config_initialized) { |
// radeon_afmt_fini(rdev); |
// drm_kms_helper_poll_fini(rdev->ddev); |
1333,8 → 1337,6 |
// drm_mode_config_cleanup(rdev->ddev); |
rdev->mode_info.mode_config_initialized = false; |
} |
/* free i2c buses */ |
radeon_i2c_fini(rdev); |
} |
static bool is_hdtv_mode(const struct drm_display_mode *mode) |
/drivers/video/drm/radeon/radeon_dp_mst.c |
---|
89,9 → 89,17 |
WREG32(NI_DP_MSE_SAT_UPDATE + primary->offset, 1); |
do { |
unsigned value1, value2; |
udelay(10); |
temp = RREG32(NI_DP_MSE_SAT_UPDATE + primary->offset); |
} while ((temp & 0x1) && retries++ < 10000); |
value1 = temp & NI_DP_MSE_SAT_UPDATE_MASK; |
value2 = temp & NI_DP_MSE_16_MTP_KEEPOUT; |
if (!value1 && !value2) |
break; |
} while (retries++ < 50); |
if (retries == 10000) |
DRM_ERROR("timed out waitin for SAT update %d\n", primary->offset); |
150,7 → 158,7 |
return 0; |
} |
static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, uint32_t x, uint32_t y) |
static int radeon_dp_mst_set_vcp_size(struct radeon_encoder *mst, s64 avg_time_slots_per_mtp) |
{ |
struct drm_device *dev = mst->base.dev; |
struct radeon_device *rdev = dev->dev_private; |
158,6 → 166,8 |
uint32_t val, temp; |
uint32_t offset = radeon_atom_set_enc_offset(mst_enc->fe); |
int retries = 0; |
uint32_t x = drm_fixp2int(avg_time_slots_per_mtp); |
uint32_t y = drm_fixp2int_ceil((avg_time_slots_per_mtp - x) << 26); |
val = NI_DP_MSE_RATE_X(x) | NI_DP_MSE_RATE_Y(y); |
165,6 → 175,7 |
do { |
temp = RREG32(NI_DP_MSE_RATE_UPDATE + offset); |
udelay(10); |
} while ((temp & 0x1) && (retries++ < 10000)); |
if (retries >= 10000) |
246,14 → 257,8 |
kfree(radeon_connector); |
} |
static int radeon_connector_dpms(struct drm_connector *connector, int mode) |
{ |
DRM_DEBUG_KMS("\n"); |
return 0; |
} |
static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { |
.dpms = radeon_connector_dpms, |
.dpms = drm_helper_connector_dpms, |
.detect = radeon_dp_mst_detect, |
.fill_modes = drm_helper_probe_single_connector_modes, |
.destroy = radeon_dp_mst_connector_destroy, |
394,7 → 399,7 |
struct drm_crtc *crtc; |
struct radeon_crtc *radeon_crtc; |
int ret, slots; |
s64 fixed_pbn, fixed_pbn_per_slot, avg_time_slots_per_mtp; |
if (!ASIC_IS_DCE5(rdev)) { |
DRM_ERROR("got mst dpms on non-DCE5\n"); |
return; |
456,8 → 461,12 |
mst_enc->enc_active = true; |
radeon_dp_mst_update_stream_attribs(radeon_connector->mst_port, primary); |
radeon_dp_mst_set_vcp_size(radeon_encoder, slots, 0); |
fixed_pbn = drm_int2fixp(mst_enc->pbn); |
fixed_pbn_per_slot = drm_int2fixp(radeon_connector->mst_port->mst_mgr.pbn_div); |
avg_time_slots_per_mtp = drm_fixp_div(fixed_pbn, fixed_pbn_per_slot); |
radeon_dp_mst_set_vcp_size(radeon_encoder, avg_time_slots_per_mtp); |
atombios_dig_encoder_setup2(&primary->base, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0, |
mst_enc->fe); |
ret = drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr); |
510,6 → 519,7 |
{ |
struct radeon_encoder_mst *mst_enc; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_connector_atom_dig *dig_connector; |
int bpp = 24; |
mst_enc = radeon_encoder->enc_priv; |
523,14 → 533,11 |
drm_mode_set_crtcinfo(adjusted_mode, 0); |
{ |
struct radeon_connector_atom_dig *dig_connector; |
dig_connector = mst_enc->connector->con_priv; |
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); |
dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd); |
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, |
dig_connector->dp_lane_count, dig_connector->dp_clock); |
} |
dig_connector = mst_enc->connector->con_priv; |
dig_connector->dp_lane_count = drm_dp_max_lane_count(dig_connector->dpcd); |
dig_connector->dp_clock = drm_dp_max_link_rate(dig_connector->dpcd); |
DRM_DEBUG_KMS("dig clock %p %d %d\n", dig_connector, |
dig_connector->dp_lane_count, dig_connector->dp_clock); |
return true; |
} |
/drivers/video/drm/radeon/radeon_fb.c |
---|
39,9 → 39,9 |
struct drm_gem_object *main_fb_obj; |
/* object hierarchy - |
this contains a helper + a radeon fb |
the helper contains a pointer to radeon framebuffer baseclass. |
*/ |
* this contains a helper + a radeon fb |
* the helper contains a pointer to radeon framebuffer baseclass. |
*/ |
struct radeon_fbdev { |
struct drm_fb_helper helper; |
struct radeon_framebuffer rfb; |
309,6 → 309,10 |
int bpp_sel = 32; |
int ret; |
/* don't enable fbdev if no connectors */ |
if (list_empty(&rdev->ddev->mode_config.connector_list)) |
return 0; |
/* select 8 bpp console on RN50 or 16MB cards */ |
if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) |
bpp_sel = 8; |
/drivers/video/drm/radeon/radeon_fence.c |
---|
408,7 → 408,7 |
* |
* Check if the requested fence has signaled (all asics). |
* Returns true if the fence has signaled or false if it has not. |
*/ |
*/ |
bool radeon_fence_signaled(struct radeon_fence *fence) |
{ |
if (!fence) |
475,40 → 475,40 |
return timeout; |
/* enable IRQs and tracing */ |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
trace_radeon_fence_wait_begin(rdev->ddev, i, target_seq[i]); |
radeon_irq_kms_sw_irq_get(rdev, i); |
} |
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
if (intr) { |
r = wait_event_interruptible_timeout(rdev->fence_queue, ( |
radeon_fence_any_seq_signaled(rdev, target_seq) |
|| rdev->needs_reset), timeout); |
} else { |
r = wait_event_timeout(rdev->fence_queue, ( |
} else { |
r = wait_event_timeout(rdev->fence_queue, ( |
radeon_fence_any_seq_signaled(rdev, target_seq) |
|| rdev->needs_reset), timeout); |
} |
} |
if (rdev->needs_reset) |
r = -EDEADLK; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
if (!target_seq[i]) |
continue; |
radeon_irq_kms_sw_irq_put(rdev, i); |
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
} |
radeon_irq_kms_sw_irq_put(rdev, i); |
trace_radeon_fence_wait_end(rdev->ddev, i, target_seq[i]); |
} |
return r; |
return r; |
} |
/** |
* radeon_fence_wait - wait for a fence to signal |
* radeon_fence_wait_timeout - wait for a fence to signal with timeout |
* |
* @fence: radeon fence object |
* @intr: use interruptible sleep |
516,12 → 516,15 |
* Wait for the requested fence to signal (all asics). |
* @intr selects whether to use interruptable (true) or non-interruptable |
* (false) sleep when waiting for the fence. |
* Returns 0 if the fence has passed, error for all other cases. |
* @timeout: maximum time to wait, or MAX_SCHEDULE_TIMEOUT for infinite wait |
* Returns remaining time if the sequence number has passed, 0 when |
* the wait timeout, or an error for all other cases. |
*/ |
int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeout) |
{ |
uint64_t seq[RADEON_NUM_RINGS] = {}; |
long r; |
int r_sig; |
/* |
* This function should not be called on !radeon fences. |
533,18 → 536,39 |
return fence_wait(&fence->base, intr); |
seq[fence->ring] = fence->seq; |
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, MAX_SCHEDULE_TIMEOUT); |
if (r < 0) { |
return r; |
r = radeon_fence_wait_seq_timeout(fence->rdev, seq, intr, timeout); |
if (r <= 0) { |
return r; |
} |
r = fence_signal(&fence->base); |
if (!r) |
r_sig = fence_signal(&fence->base); |
if (!r_sig) |
FENCE_TRACE(&fence->base, "signaled from fence_wait\n"); |
return 0; |
return r; |
} |
/** |
* radeon_fence_wait - wait for a fence to signal |
* |
* @fence: radeon fence object |
* @intr: use interruptible sleep |
* |
* Wait for the requested fence to signal (all asics). |
* @intr selects whether to use interruptable (true) or non-interruptable |
* (false) sleep when waiting for the fence. |
* Returns 0 if the fence has passed, error for all other cases. |
*/ |
int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
{ |
long r = radeon_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); |
if (r > 0) { |
return 0; |
} else { |
return r; |
} |
} |
/** |
* radeon_fence_wait_any - wait for a fence to signal on any ring |
* |
* @rdev: radeon device pointer |
/drivers/video/drm/radeon/radeon_gart.c |
---|
66,8 → 66,8 |
{ |
void *ptr; |
ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, |
&rdev->gart.table_addr); |
ptr = pci_alloc_consistent(rdev->pdev, rdev->gart.table_size, |
&rdev->gart.table_addr); |
if (ptr == NULL) { |
return -ENOMEM; |
} |
/drivers/video/drm/radeon/radeon_gem.c |
---|
257,7 → 257,6 |
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, |
struct drm_file *filp) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_radeon_gem_busy *args = data; |
struct drm_gem_object *gobj; |
struct radeon_bo *robj; |
272,7 → 271,6 |
r = radeon_bo_wait(robj, &cur_placement, true); |
args->domain = radeon_mem_type_to_domain(cur_placement); |
drm_gem_object_unreference_unlocked(gobj); |
r = radeon_gem_handle_lockup(rdev, r); |
return r; |
} |
/drivers/video/drm/radeon/radeon_ib.c |
---|
274,7 → 274,7 |
if (i == RADEON_RING_TYPE_GFX_INDEX) { |
/* oh, oh, that's really bad */ |
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r); |
rdev->accel_working = false; |
rdev->accel_working = false; |
return r; |
} else { |
304,7 → 304,7 |
} |
static struct drm_info_list radeon_debugfs_sa_list[] = { |
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, |
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL}, |
}; |
#endif |
/drivers/video/drm/radeon/radeon_irq_kms.c |
---|
156,10 → 156,12 |
if (r) { |
return r; |
} |
rdev->ddev->vblank_disable_allowed = true; |
/* enable msi */ |
rdev->msi_enabled = 0; |
rdev->irq.installed = true; |
rdev->irq.installed = true; |
r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); |
if (r) { |
rdev->irq.installed = false; |
/drivers/video/drm/radeon/radeon_kfd.h |
---|
33,7 → 33,7 |
struct radeon_device; |
bool radeon_kfd_init(void); |
int radeon_kfd_init(void); |
void radeon_kfd_fini(void); |
void radeon_kfd_suspend(struct radeon_device *rdev); |
/drivers/video/drm/radeon/radeon_kms.c |
---|
30,6 → 30,7 |
#include <drm/radeon_drm.h> |
#include "radeon_asic.h" |
#include <linux/vga_switcheroo.h> |
#include <linux/slab.h> |
#include <linux/pm_runtime.h> |
/drivers/video/drm/radeon/radeon_legacy_crtc.c |
---|
331,13 → 331,15 |
RADEON_CRTC_DISP_REQ_EN_B)); |
WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); |
} |
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); |
if (dev->num_crtcs > radeon_crtc->crtc_id) |
drm_vblank_on(dev, radeon_crtc->crtc_id); |
radeon_crtc_load_lut(crtc); |
break; |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
case DRM_MODE_DPMS_OFF: |
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
if (dev->num_crtcs > radeon_crtc->crtc_id) |
drm_vblank_off(dev, radeon_crtc->crtc_id); |
if (radeon_crtc->crtc_id) |
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); |
else { |
/drivers/video/drm/radeon/radeon_legacy_encoders.c |
---|
818,52 → 818,52 |
tmds_transmitter_cntl = RREG32(RADEON_TMDS_TRANSMITTER_CNTL) & |
~(RADEON_TMDS_TRANSMITTER_PLLRST); |
if (rdev->family == CHIP_R200 || |
rdev->family == CHIP_R100 || |
ASIC_IS_R300(rdev)) |
tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); |
else /* RV chips got this bit reversed */ |
tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; |
if (rdev->family == CHIP_R200 || |
rdev->family == CHIP_R100 || |
ASIC_IS_R300(rdev)) |
tmds_transmitter_cntl &= ~(RADEON_TMDS_TRANSMITTER_PLLEN); |
else /* RV chips got this bit reversed */ |
tmds_transmitter_cntl |= RADEON_TMDS_TRANSMITTER_PLLEN; |
fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) | |
(RADEON_FP_CRTC_DONT_SHADOW_VPAR | |
RADEON_FP_CRTC_DONT_SHADOW_HEND)); |
fp_gen_cntl = (RREG32(RADEON_FP_GEN_CNTL) | |
(RADEON_FP_CRTC_DONT_SHADOW_VPAR | |
RADEON_FP_CRTC_DONT_SHADOW_HEND)); |
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); |
fp_gen_cntl &= ~(RADEON_FP_FPON | RADEON_FP_TMDS_EN); |
fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN | |
RADEON_FP_DFP_SYNC_SEL | |
RADEON_FP_CRT_SYNC_SEL | |
RADEON_FP_CRTC_LOCK_8DOT | |
RADEON_FP_USE_SHADOW_EN | |
RADEON_FP_CRTC_USE_SHADOW_VEND | |
RADEON_FP_CRT_SYNC_ALT); |
fp_gen_cntl &= ~(RADEON_FP_RMX_HVSYNC_CONTROL_EN | |
RADEON_FP_DFP_SYNC_SEL | |
RADEON_FP_CRT_SYNC_SEL | |
RADEON_FP_CRTC_LOCK_8DOT | |
RADEON_FP_USE_SHADOW_EN | |
RADEON_FP_CRTC_USE_SHADOW_VEND | |
RADEON_FP_CRT_SYNC_ALT); |
if (1) /* FIXME rgbBits == 8 */ |
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ |
else |
fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ |
if (1) /* FIXME rgbBits == 8 */ |
fp_gen_cntl |= RADEON_FP_PANEL_FORMAT; /* 24 bit format */ |
else |
fp_gen_cntl &= ~RADEON_FP_PANEL_FORMAT;/* 18 bit format */ |
if (radeon_crtc->crtc_id == 0) { |
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
if (radeon_encoder->rmx_type != RMX_OFF) |
fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; |
else |
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
} else |
fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2; |
} else { |
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; |
} else |
fp_gen_cntl |= RADEON_FP_SEL_CRTC2; |
} |
if (radeon_crtc->crtc_id == 0) { |
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
if (radeon_encoder->rmx_type != RMX_OFF) |
fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; |
else |
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
} else |
fp_gen_cntl &= ~RADEON_FP_SEL_CRTC2; |
} else { |
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC2; |
} else |
fp_gen_cntl |= RADEON_FP_SEL_CRTC2; |
} |
WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); |
WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); |
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); |
WREG32(RADEON_TMDS_PLL_CNTL, tmds_pll_cntl); |
WREG32(RADEON_TMDS_TRANSMITTER_CNTL, tmds_transmitter_cntl); |
WREG32(RADEON_FP_GEN_CNTL, fp_gen_cntl); |
if (rdev->is_atom_bios) |
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
/drivers/video/drm/radeon/radeon_pm.c |
---|
76,7 → 76,7 |
radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); |
} |
mutex_unlock(&rdev->pm.mutex); |
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
} else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { |
if (rdev->pm.profile == PM_PROFILE_AUTO) { |
mutex_lock(&rdev->pm.mutex); |
radeon_pm_update_profile(rdev); |
275,7 → 275,7 |
if (rdev->pm.active_crtcs & (1 << i)) { |
/* This can fail if a modeset is in progress */ |
if (drm_vblank_get(rdev->ddev, i) == 0) |
rdev->pm.req_vblank |= (1 << i); |
rdev->pm.req_vblank |= (1 << i); |
else |
DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", |
i); |
/drivers/video/drm/radeon/radeon_semaphore.c |
---|
56,7 → 56,7 |
} |
bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ridx, |
struct radeon_semaphore *semaphore) |
struct radeon_semaphore *semaphore) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
73,7 → 73,7 |
} |
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx, |
struct radeon_semaphore *semaphore) |
struct radeon_semaphore *semaphore) |
{ |
struct radeon_ring *ring = &rdev->ring[ridx]; |
/drivers/video/drm/radeon/radeon_uvd.c |
---|
722,9 → 722,11 |
return r; |
} |
/* multiple fence commands without any stream commands in between can |
crash the vcpu so just try to emmit a dummy create/destroy msg to |
avoid this */ |
/* |
* multiple fence commands without any stream commands in between can |
* crash the vcpu so just try to emmit a dummy create/destroy msg to |
* avoid this |
*/ |
int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring, |
uint32_t handle, struct radeon_fence **fence) |
{ |
/drivers/video/drm/radeon/radeon_vce.c |
---|
166,7 → 166,7 |
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) { |
atomic_set(&rdev->vce.handles[i], 0); |
rdev->vce.filp[i] = NULL; |
} |
} |
return 0; |
} |
389,7 → 389,7 |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
} |
if (fence) |
446,7 → 446,7 |
r = radeon_ib_schedule(rdev, &ib, NULL, false); |
if (r) { |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); |
} |
if (fence) |
769,18 → 769,18 |
radeon_ring_unlock_commit(rdev, ring, false); |
for (i = 0; i < rdev->usec_timeout; i++) { |
if (vce_v1_0_get_rptr(rdev, ring) != rptr) |
break; |
DRM_UDELAY(1); |
if (vce_v1_0_get_rptr(rdev, ring) != rptr) |
break; |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ring test on %d succeeded in %d usecs\n", |
ring->idx, i); |
DRM_INFO("ring test on %d succeeded in %d usecs\n", |
ring->idx, i); |
} else { |
DRM_ERROR("radeon: ring %d test failed\n", |
ring->idx); |
r = -ETIMEDOUT; |
DRM_ERROR("radeon: ring %d test failed\n", |
ring->idx); |
r = -ETIMEDOUT; |
} |
return r; |
/drivers/video/drm/radeon/radeon_vm.c |
---|
611,15 → 611,16 |
*/ |
static uint32_t radeon_vm_page_flags(uint32_t flags) |
{ |
uint32_t hw_flags = 0; |
hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; |
hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; |
hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
hw_flags |= R600_PTE_SYSTEM; |
hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; |
} |
return hw_flags; |
uint32_t hw_flags = 0; |
hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; |
hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; |
hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; |
if (flags & RADEON_VM_PAGE_SYSTEM) { |
hw_flags |= R600_PTE_SYSTEM; |
hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; |
} |
return hw_flags; |
} |
/** |
/drivers/video/drm/radeon/rs780_dpm.c |
---|
795,7 → 795,7 |
union pplib_clock_info *clock_info; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
struct igp_ps *ps; |
/drivers/video/drm/radeon/rv6xx_dpm.c |
---|
209,7 → 209,7 |
static bool rv6xx_can_step_post_div(struct radeon_device *rdev, |
struct rv6xx_sclk_stepping *cur, |
struct rv6xx_sclk_stepping *target) |
struct rv6xx_sclk_stepping *target) |
{ |
return (cur->post_divider > target->post_divider) && |
((cur->vco_frequency * target->post_divider) <= |
239,7 → 239,7 |
static void rv6xx_generate_steps(struct radeon_device *rdev, |
u32 low, u32 high, |
u32 start_index, u8 *end_index) |
u32 start_index, u8 *end_index) |
{ |
struct rv6xx_sclk_stepping cur; |
struct rv6xx_sclk_stepping target; |
1356,23 → 1356,23 |
enum radeon_dpm_event_src dpm_event_src; |
switch (sources) { |
case 0: |
default: |
case 0: |
default: |
want_thermal_protection = false; |
break; |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; |
break; |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; |
break; |
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | |
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | |
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): |
want_thermal_protection = true; |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; |
break; |
} |
1879,7 → 1879,7 |
union pplib_clock_info *clock_info; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
struct rv6xx_ps *ps; |
/drivers/video/drm/radeon/rv740_dpm.c |
---|
36,28 → 36,28 |
u32 ref = 0; |
switch (encoded_ref) { |
case 0: |
case 0: |
ref = 1; |
break; |
case 16: |
case 16: |
ref = 2; |
break; |
case 17: |
case 17: |
ref = 3; |
break; |
case 18: |
case 18: |
ref = 2; |
break; |
case 19: |
case 19: |
ref = 3; |
break; |
case 20: |
case 20: |
ref = 4; |
break; |
case 21: |
case 21: |
ref = 5; |
break; |
default: |
default: |
DRM_ERROR("Invalid encoded Reference Divider\n"); |
ref = 0; |
break; |
/drivers/video/drm/radeon/rv770_dpm.c |
---|
345,27 → 345,27 |
int ret = 0; |
switch (postdiv) { |
case 1: |
case 1: |
*encoded_postdiv = 0; |
break; |
case 2: |
case 2: |
*encoded_postdiv = 1; |
break; |
case 4: |
case 4: |
*encoded_postdiv = 2; |
break; |
case 8: |
case 8: |
*encoded_postdiv = 3; |
break; |
case 16: |
case 16: |
*encoded_postdiv = 4; |
break; |
default: |
default: |
ret = -EINVAL; |
break; |
} |
return ret; |
return ret; |
} |
u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf) |
1175,15 → 1175,15 |
rv770_populate_smc_mvdd_table(rdev, table); |
switch (rdev->pm.int_thermal_type) { |
case THERMAL_TYPE_RV770: |
case THERMAL_TYPE_ADT7473_WITH_INTERNAL: |
case THERMAL_TYPE_RV770: |
case THERMAL_TYPE_ADT7473_WITH_INTERNAL: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; |
break; |
case THERMAL_TYPE_NONE: |
case THERMAL_TYPE_NONE: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; |
break; |
case THERMAL_TYPE_EXTERNAL_GPIO: |
default: |
case THERMAL_TYPE_EXTERNAL_GPIO: |
default: |
table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; |
break; |
} |
1567,18 → 1567,18 |
sw_smio_index = |
(RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT; |
switch (sw_smio_index) { |
case 3: |
case 3: |
vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL); |
break; |
case 2: |
case 2: |
vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL); |
break; |
case 1: |
case 1: |
vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL); |
break; |
case 0: |
case 0: |
return; |
default: |
default: |
vid_smio_cntl = pi->s0_vid_lower_smio_cntl; |
break; |
} |
1817,21 → 1817,21 |
enum radeon_dpm_event_src dpm_event_src; |
switch (sources) { |
case 0: |
default: |
case 0: |
default: |
want_thermal_protection = false; |
break; |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; |
break; |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL): |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL; |
break; |
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | |
case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | |
(1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)): |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; |
2273,7 → 2273,7 |
union pplib_clock_info *clock_info; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
struct rv7xx_ps *ps; |
/drivers/video/drm/radeon/si.c |
---|
1307,7 → 1307,7 |
*/ |
u32 si_get_xclk(struct radeon_device *rdev) |
{ |
u32 reference_clock = rdev->clock.spll.reference_freq; |
u32 reference_clock = rdev->clock.spll.reference_freq; |
u32 tmp; |
tmp = RREG32(CG_CLKPIN_CNTL_2); |
2442,8 → 2442,10 |
*/ |
static void si_tiling_mode_table_init(struct radeon_device *rdev) |
{ |
const u32 num_tile_mode_states = 32; |
u32 reg_offset, gb_tile_moden, split_equal_to_row_size; |
u32 *tile = rdev->config.si.tile_mode_array; |
const u32 num_tile_mode_states = |
ARRAY_SIZE(rdev->config.si.tile_mode_array); |
u32 reg_offset, split_equal_to_row_size; |
switch (rdev->config.si.mem_row_size_in_kb) { |
case 1: |
2458,491 → 2460,442 |
break; |
} |
if ((rdev->family == CHIP_TAHITI) || |
(rdev->family == CHIP_PITCAIRN)) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: /* non-AA compressed depth or any compressed stencil */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 1: /* 2xAA/4xAA compressed depth only */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 2: /* 8xAA compressed depth only */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 8: /* 1D and 1D Array Surfaces */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 9: /* Displayable maps. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 10: /* Display 8bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 11: /* Display 16bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 12: /* Display 32bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
case 13: /* Thin. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 14: /* Thin 8 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
case 15: /* Thin 16 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
case 16: /* Thin 32 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
case 17: /* Thin 64 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
case 21: /* 8 bpp PRT. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 22: /* 16 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 23: /* 32 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 24: /* 64 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 25: /* 128 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | |
NUM_BANKS(ADDR_SURF_8_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
} else if ((rdev->family == CHIP_VERDE) || |
(rdev->family == CHIP_OLAND) || |
(rdev->family == CHIP_HAINAN)) { |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { |
switch (reg_offset) { |
case 0: /* non-AA compressed depth or any compressed stencil */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 1: /* 2xAA/4xAA compressed depth only */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 2: /* 8xAA compressed depth only */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 8: /* 1D and 1D Array Surfaces */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 9: /* Displayable maps. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 10: /* Display 8bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 11: /* Display 16bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 12: /* Display 32bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 13: /* Thin. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 14: /* Thin 8 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 15: /* Thin 16 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 16: /* Thin 32 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 17: /* Thin 64 bpp. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 21: /* 8 bpp PRT. */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 22: /* 16 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
break; |
case 23: /* 32 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 24: /* 64 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
break; |
case 25: /* 128 bpp PRT */ |
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | |
NUM_BANKS(ADDR_SURF_8_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
break; |
default: |
gb_tile_moden = 0; |
break; |
} |
rdev->config.si.tile_mode_array[reg_offset] = gb_tile_moden; |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); |
} |
} else |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
tile[reg_offset] = 0; |
switch(rdev->family) { |
case CHIP_TAHITI: |
case CHIP_PITCAIRN: |
/* non-AA compressed depth or any compressed stencil */ |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 2xAA/4xAA compressed depth only */ |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 8xAA compressed depth only */ |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ |
tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Uncompressed 16bpp depth - and stencil buffer allocated with it */ |
tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Uncompressed 32bpp depth - and stencil buffer allocated with it */ |
tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ |
tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 1D and 1D Array Surfaces */ |
tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Displayable maps. */ |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Display 8bpp. */ |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Display 16bpp. */ |
tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Display 32bpp. */ |
tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
/* Thin. */ |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Thin 8 bpp. */ |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
/* Thin 16 bpp. */ |
tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
/* Thin 32 bpp. */ |
tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
/* Thin 64 bpp. */ |
tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
/* 8 bpp PRT. */ |
tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 16 bpp PRT */ |
tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* 32 bpp PRT */ |
tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 64 bpp PRT */ |
tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 128 bpp PRT */ |
tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | |
NUM_BANKS(ADDR_SURF_8_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]); |
break; |
case CHIP_VERDE: |
case CHIP_OLAND: |
case CHIP_HAINAN: |
/* non-AA compressed depth or any compressed stencil */ |
tile[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* 2xAA/4xAA compressed depth only */ |
tile[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* 8xAA compressed depth only */ |
tile[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* 2xAA/4xAA compressed depth with stencil (for depth buffer) */ |
tile[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */ |
tile[4] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Uncompressed 16bpp depth - and stencil buffer allocated with it */ |
tile[5] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Uncompressed 32bpp depth - and stencil buffer allocated with it */ |
tile[6] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Uncompressed 8bpp stencil without depth (drivers typically do not use) */ |
tile[7] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* 1D and 1D Array Surfaces */ |
tile[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Displayable maps. */ |
tile[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Display 8bpp. */ |
tile[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* Display 16bpp. */ |
tile[11] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Display 32bpp. */ |
tile[12] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Thin. */ |
tile[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Thin 8 bpp. */ |
tile[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Thin 16 bpp. */ |
tile[15] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Thin 32 bpp. */ |
tile[16] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* Thin 64 bpp. */ |
tile[17] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P4_8x16) | |
TILE_SPLIT(split_equal_to_row_size) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 8 bpp PRT. */ |
tile[21] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 16 bpp PRT */ |
tile[22] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4)); |
/* 32 bpp PRT */ |
tile[23] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 64 bpp PRT */ |
tile[24] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) | |
NUM_BANKS(ADDR_SURF_16_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2)); |
/* 128 bpp PRT */ |
tile[25] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) | |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) | |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) | |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) | |
NUM_BANKS(ADDR_SURF_8_BANK) | |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) | |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) | |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1)); |
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) |
WREG32(GB_TILE_MODE0 + (reg_offset * 4), tile[reg_offset]); |
break; |
default: |
DRM_ERROR("unknown asic: 0x%x\n", rdev->family); |
} |
} |
static void si_select_se_sh(struct radeon_device *rdev, |
7288,7 → 7241,7 |
mutex_lock(&rdev->gpu_clock_mutex); |
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); |
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); |
mutex_unlock(&rdev->gpu_clock_mutex); |
return clock; |
} |
7745,33 → 7698,33 |
int si_vce_send_vcepll_ctlreq(struct radeon_device *rdev) |
{ |
unsigned i; |
unsigned i; |
/* make sure VCEPLL_CTLREQ is deasserted */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
/* make sure VCEPLL_CTLREQ is deasserted */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
mdelay(10); |
mdelay(10); |
/* assert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); |
/* assert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, UPLL_CTLREQ_MASK, ~UPLL_CTLREQ_MASK); |
/* wait for CTLACK and CTLACK2 to get asserted */ |
for (i = 0; i < 100; ++i) { |
uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; |
if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) |
break; |
mdelay(10); |
} |
/* wait for CTLACK and CTLACK2 to get asserted */ |
for (i = 0; i < 100; ++i) { |
uint32_t mask = UPLL_CTLACK_MASK | UPLL_CTLACK2_MASK; |
if ((RREG32_SMC(CG_VCEPLL_FUNC_CNTL) & mask) == mask) |
break; |
mdelay(10); |
} |
/* deassert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
/* deassert UPLL_CTLREQ */ |
WREG32_SMC_P(CG_VCEPLL_FUNC_CNTL, 0, ~UPLL_CTLREQ_MASK); |
if (i == 100) { |
DRM_ERROR("Timeout setting UVD clocks!\n"); |
return -ETIMEDOUT; |
} |
if (i == 100) { |
DRM_ERROR("Timeout setting UVD clocks!\n"); |
return -ETIMEDOUT; |
} |
return 0; |
return 0; |
} |
int si_set_vce_clocks(struct radeon_device *rdev, u32 evclk, u32 ecclk) |
/drivers/video/drm/radeon/si_dpm.c |
---|
499,7 → 499,7 |
static const struct si_cac_config_reg cac_override_pitcairn[] = |
{ |
{ 0xFFFFFFFF } |
{ 0xFFFFFFFF } |
}; |
static const struct si_powertune_data powertune_data_pitcairn = |
991,7 → 991,7 |
static const struct si_cac_config_reg cac_override_cape_verde[] = |
{ |
{ 0xFFFFFFFF } |
{ 0xFFFFFFFF } |
}; |
static const struct si_powertune_data powertune_data_cape_verde = |
1762,9 → 1762,9 |
static struct si_power_info *si_get_pi(struct radeon_device *rdev) |
{ |
struct si_power_info *pi = rdev->pm.dpm.priv; |
struct si_power_info *pi = rdev->pm.dpm.priv; |
return pi; |
return pi; |
} |
static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, |
3157,9 → 3157,9 |
} |
} |
for (i = 0; i < ps->performance_level_count; i++) |
btc_adjust_clock_combinations(rdev, max_limits, |
&ps->performance_levels[i]); |
for (i = 0; i < ps->performance_level_count; i++) |
btc_adjust_clock_combinations(rdev, max_limits, |
&ps->performance_levels[i]); |
for (i = 0; i < ps->performance_level_count; i++) { |
if (ps->performance_levels[i].vddc < min_vce_voltage) |
3298,7 → 3298,7 |
case 0: |
default: |
want_thermal_protection = false; |
break; |
break; |
case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL): |
want_thermal_protection = true; |
dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL; |
3500,7 → 3500,7 |
if (ret) |
return ret; |
si_pi->state_table_start = tmp; |
si_pi->state_table_start = tmp; |
ret = si_read_smc_sram_dword(rdev, |
SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + |
3659,7 → 3659,7 |
si_write_smc_soft_register(rdev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); |
voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time; |
backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time; |
backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time; |
if (voltage_response_time == 0) |
voltage_response_time = 1000; |
3767,7 → 3767,7 |
&pi->pbsu); |
pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); |
pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); |
pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); |
WREG32(CG_BSP, pi->dsp); |
4315,7 → 4315,7 |
radeon_atom_set_engine_dram_timings(rdev, |
pl->sclk, |
pl->mclk); |
pl->mclk); |
dram_timing = RREG32(MC_ARB_DRAM_TIMING); |
dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); |
4350,7 → 4350,7 |
si_pi->sram_end); |
if (ret) |
break; |
} |
} |
return ret; |
} |
4828,9 → 4828,9 |
spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; |
spll_func_cntl_2 |= SCLK_MUX_SEL(2); |
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; |
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); |
spll_func_cntl_3 |= SPLL_DITHEN; |
spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; |
spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); |
spll_func_cntl_3 |= SPLL_DITHEN; |
if (pi->sclk_ss) { |
struct radeon_atom_ss ss; |
4937,15 → 4937,15 |
tmp = freq_nom / reference_clock; |
tmp = tmp * tmp; |
if (radeon_atombios_get_asic_ss_info(rdev, &ss, |
ASIC_INTERNAL_MEMORY_SS, freq_nom)) { |
ASIC_INTERNAL_MEMORY_SS, freq_nom)) { |
u32 clks = reference_clock * 5 / ss.rate; |
u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); |
mpll_ss1 &= ~CLKV_MASK; |
mpll_ss1 |= CLKV(clkv); |
mpll_ss1 &= ~CLKV_MASK; |
mpll_ss1 |= CLKV(clkv); |
mpll_ss2 &= ~CLKS_MASK; |
mpll_ss2 |= CLKS(clks); |
mpll_ss2 &= ~CLKS_MASK; |
mpll_ss2 |= CLKS(clks); |
} |
} |
5272,7 → 5272,7 |
ni_pi->enable_power_containment = false; |
ret = si_populate_sq_ramping_values(rdev, radeon_state, smc_state); |
if (ret) |
if (ret) |
ni_pi->enable_sq_ramping = false; |
return si_populate_smc_t(rdev, radeon_state, smc_state); |
5443,46 → 5443,46 |
case MC_SEQ_RAS_TIMING >> 2: |
*out_reg = MC_SEQ_RAS_TIMING_LP >> 2; |
break; |
case MC_SEQ_CAS_TIMING >> 2: |
case MC_SEQ_CAS_TIMING >> 2: |
*out_reg = MC_SEQ_CAS_TIMING_LP >> 2; |
break; |
case MC_SEQ_MISC_TIMING >> 2: |
case MC_SEQ_MISC_TIMING >> 2: |
*out_reg = MC_SEQ_MISC_TIMING_LP >> 2; |
break; |
case MC_SEQ_MISC_TIMING2 >> 2: |
case MC_SEQ_MISC_TIMING2 >> 2: |
*out_reg = MC_SEQ_MISC_TIMING2_LP >> 2; |
break; |
case MC_SEQ_RD_CTL_D0 >> 2: |
case MC_SEQ_RD_CTL_D0 >> 2: |
*out_reg = MC_SEQ_RD_CTL_D0_LP >> 2; |
break; |
case MC_SEQ_RD_CTL_D1 >> 2: |
case MC_SEQ_RD_CTL_D1 >> 2: |
*out_reg = MC_SEQ_RD_CTL_D1_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_D0 >> 2: |
case MC_SEQ_WR_CTL_D0 >> 2: |
*out_reg = MC_SEQ_WR_CTL_D0_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_D1 >> 2: |
case MC_SEQ_WR_CTL_D1 >> 2: |
*out_reg = MC_SEQ_WR_CTL_D1_LP >> 2; |
break; |
case MC_PMG_CMD_EMRS >> 2: |
case MC_PMG_CMD_EMRS >> 2: |
*out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2; |
break; |
case MC_PMG_CMD_MRS >> 2: |
case MC_PMG_CMD_MRS >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2; |
break; |
case MC_PMG_CMD_MRS1 >> 2: |
case MC_PMG_CMD_MRS1 >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2; |
break; |
case MC_SEQ_PMG_TIMING >> 2: |
case MC_SEQ_PMG_TIMING >> 2: |
*out_reg = MC_SEQ_PMG_TIMING_LP >> 2; |
break; |
case MC_PMG_CMD_MRS2 >> 2: |
case MC_PMG_CMD_MRS2 >> 2: |
*out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2; |
break; |
case MC_SEQ_WR_CTL_2 >> 2: |
case MC_SEQ_WR_CTL_2 >> 2: |
*out_reg = MC_SEQ_WR_CTL_2_LP >> 2; |
break; |
default: |
default: |
result = false; |
break; |
} |
5569,19 → 5569,19 |
WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); |
WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); |
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); |
if (ret) |
goto init_mc_done; |
ret = radeon_atom_init_mc_reg_table(rdev, module_index, table); |
if (ret) |
goto init_mc_done; |
ret = si_copy_vbios_mc_reg_table(table, si_table); |
if (ret) |
goto init_mc_done; |
ret = si_copy_vbios_mc_reg_table(table, si_table); |
if (ret) |
goto init_mc_done; |
si_set_s0_mc_reg_index(si_table); |
ret = si_set_mc_special_registers(rdev, si_table); |
if (ret) |
goto init_mc_done; |
if (ret) |
goto init_mc_done; |
si_set_valid_flag(si_table); |
5722,10 → 5722,10 |
static void si_enable_voltage_control(struct radeon_device *rdev, bool enable) |
{ |
if (enable) |
WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); |
else |
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); |
if (enable) |
WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); |
else |
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); |
} |
static enum radeon_pcie_gen si_get_maximum_link_speed(struct radeon_device *rdev, |
6827,7 → 6827,7 |
struct _NonClockInfoArray *non_clock_info_array; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
u8 *power_state_offset; |
struct ni_ps *ps; |
/drivers/video/drm/radeon/sumo_dpm.c |
---|
787,8 → 787,8 |
struct atom_clock_dividers dividers; |
int ret; |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
pi->acpi_pl.sclk, |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
pi->acpi_pl.sclk, |
false, ÷rs); |
if (ret) |
return; |
1462,7 → 1462,7 |
struct _NonClockInfoArray *non_clock_info_array; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
u8 *power_state_offset; |
struct sumo_ps *ps; |
/drivers/video/drm/radeon/trinity_dpm.c |
---|
369,8 → 369,8 |
int ret; |
u32 hw_rev = (RREG32(HW_REV) & ATI_REV_ID_MASK) >> ATI_REV_ID_SHIFT; |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
25000, false, ÷rs); |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
25000, false, ÷rs); |
if (ret) |
return; |
587,8 → 587,8 |
u32 value; |
u32 ix = index * TRINITY_SIZEOF_DPM_STATE_TABLE; |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
sclk, false, ÷rs); |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
sclk, false, ÷rs); |
if (ret) |
return; |
597,8 → 597,8 |
value |= CLK_DIVIDER(dividers.post_div); |
WREG32_SMC(SMU_SCLK_DPM_STATE_0_CNTL_0 + ix, value); |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
sclk/2, false, ÷rs); |
ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM, |
sclk/2, false, ÷rs); |
if (ret) |
return; |
1045,14 → 1045,14 |
int low_temp = 0 * 1000; |
int high_temp = 255 * 1000; |
if (low_temp < min_temp) |
if (low_temp < min_temp) |
low_temp = min_temp; |
if (high_temp > max_temp) |
if (high_temp > max_temp) |
high_temp = max_temp; |
if (high_temp < low_temp) { |
if (high_temp < low_temp) { |
DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); |
return -EINVAL; |
} |
return -EINVAL; |
} |
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTH(49 + (high_temp / 1000)), ~DIG_THERM_INTH_MASK); |
WREG32_P(CG_THERMAL_INT_CTRL, DIG_THERM_INTL(49 + (low_temp / 1000)), ~DIG_THERM_INTL_MASK); |
1737,7 → 1737,7 |
struct _NonClockInfoArray *non_clock_info_array; |
union power_info *power_info; |
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); |
u16 data_offset; |
u16 data_offset; |
u8 frev, crev; |
u8 *power_state_offset; |
struct sumo_ps *ps; |
/drivers/video/drm/radeon/vce_v2_0.c |
---|
53,7 → 53,7 |
WREG32(VCE_UENC_REG_CLOCK_GATING, tmp); |
WREG32(VCE_CGTT_CLK_OVERRIDE, 0); |
} else { |
} else { |
tmp = RREG32(VCE_CLOCK_GATING_B); |
tmp |= 0xe7; |
tmp &= ~0xe70000; |
/drivers/video/drm/ttm/ttm_bo.c |
---|
720,9 → 720,9 |
if (mem_type == TTM_PL_SYSTEM) |
break; |
ret = (*man->func->get_node)(man, bo, place, mem); |
if (unlikely(ret)) |
return ret; |
ret = (*man->func->get_node)(man, bo, place, mem); |
if (unlikely(ret)) |
return ret; |
if (mem->mm_node) |
break; |
821,8 → 821,8 |
} |
bool ttm_bo_mem_compat(struct ttm_placement *placement, |
struct ttm_mem_reg *mem, |
uint32_t *new_flags) |
struct ttm_mem_reg *mem, |
uint32_t *new_flags) |
{ |
int i; |
854,6 → 854,7 |
return false; |
} |
EXPORT_SYMBOL(ttm_bo_mem_compat); |
int ttm_bo_validate(struct ttm_buffer_object *bo, |
struct ttm_placement *placement, |
893,17 → 894,17 |
EXPORT_SYMBOL(ttm_bo_validate); |
int ttm_bo_init(struct ttm_bo_device *bdev, |
struct ttm_buffer_object *bo, |
unsigned long size, |
enum ttm_bo_type type, |
struct ttm_placement *placement, |
uint32_t page_alignment, |
bool interruptible, |
struct file *persistent_swap_storage, |
size_t acc_size, |
struct sg_table *sg, |
struct ttm_buffer_object *bo, |
unsigned long size, |
enum ttm_bo_type type, |
struct ttm_placement *placement, |
uint32_t page_alignment, |
bool interruptible, |
struct file *persistent_swap_storage, |
size_t acc_size, |
struct sg_table *sg, |
struct reservation_object *resv, |
void (*destroy) (struct ttm_buffer_object *)) |
void (*destroy) (struct ttm_buffer_object *)) |
{ |
int ret = 0; |
unsigned long num_pages; |
920,30 → 921,30 |
} |
bo->destroy = destroy; |
kref_init(&bo->kref); |
kref_init(&bo->list_kref); |
kref_init(&bo->kref); |
kref_init(&bo->list_kref); |
atomic_set(&bo->cpu_writers, 0); |
INIT_LIST_HEAD(&bo->lru); |
INIT_LIST_HEAD(&bo->ddestroy); |
INIT_LIST_HEAD(&bo->swap); |
INIT_LIST_HEAD(&bo->io_reserve_lru); |
mutex_init(&bo->wu_mutex); |
bo->bdev = bdev; |
bo->glob = bdev->glob; |
bo->type = type; |
bo->num_pages = num_pages; |
bo->mem.size = num_pages << PAGE_SHIFT; |
bo->mem.mem_type = TTM_PL_SYSTEM; |
bo->mem.num_pages = bo->num_pages; |
bo->mem.mm_node = NULL; |
bo->mem.page_alignment = page_alignment; |
bo->mem.bus.io_reserved_vm = false; |
bo->mem.bus.io_reserved_count = 0; |
bo->priv_flags = 0; |
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
bo->persistent_swap_storage = persistent_swap_storage; |
bo->acc_size = acc_size; |
bo->sg = sg; |
INIT_LIST_HEAD(&bo->lru); |
INIT_LIST_HEAD(&bo->ddestroy); |
INIT_LIST_HEAD(&bo->swap); |
INIT_LIST_HEAD(&bo->io_reserve_lru); |
mutex_init(&bo->wu_mutex); |
bo->bdev = bdev; |
bo->glob = bdev->glob; |
bo->type = type; |
bo->num_pages = num_pages; |
bo->mem.size = num_pages << PAGE_SHIFT; |
bo->mem.mem_type = TTM_PL_SYSTEM; |
bo->mem.num_pages = bo->num_pages; |
bo->mem.mm_node = NULL; |
bo->mem.page_alignment = page_alignment; |
bo->mem.bus.io_reserved_vm = false; |
bo->mem.bus.io_reserved_count = 0; |
bo->priv_flags = 0; |
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); |
bo->persistent_swap_storage = persistent_swap_storage; |
bo->acc_size = acc_size; |
bo->sg = sg; |
if (resv) { |
bo->resv = resv; |
lockdep_assert_held(&bo->resv->lock.base); |
952,12 → 953,12 |
reservation_object_init(&bo->ttm_resv); |
} |
atomic_inc(&bo->glob->bo_count); |
drm_vma_node_reset(&bo->vma_node); |
drm_vma_node_reset(&bo->vma_node); |
/* |
* For ttm_bo_type_device buffers, allocate |
* address space from the device. |
*/ |
/* |
* For ttm_bo_type_device buffers, allocate |
* address space from the device. |
*/ |
if (bo->type == ttm_bo_type_device || |
bo->type == ttm_bo_type_sg) |
ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, |
974,13 → 975,19 |
if (likely(!ret)) |
ret = ttm_bo_validate(bo, placement, interruptible, false); |
if (!resv) |
if (!resv) { |
ttm_bo_unreserve(bo); |
} else if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { |
spin_lock(&bo->glob->lru_lock); |
ttm_bo_add_to_lru(bo); |
spin_unlock(&bo->glob->lru_lock); |
} |
if (unlikely(ret)) |
ttm_bo_unref(&bo); |
return ret; |
return ret; |
} |
EXPORT_SYMBOL(ttm_bo_init); |
1111,23 → 1118,23 |
int ttm_bo_global_init(struct drm_global_reference *ref) |
{ |
struct ttm_bo_global_ref *bo_ref = |
container_of(ref, struct ttm_bo_global_ref, ref); |
struct ttm_bo_global *glob = ref->object; |
int ret; |
struct ttm_bo_global_ref *bo_ref = |
container_of(ref, struct ttm_bo_global_ref, ref); |
struct ttm_bo_global *glob = ref->object; |
int ret; |
mutex_init(&glob->device_list_mutex); |
spin_lock_init(&glob->lru_lock); |
glob->mem_glob = bo_ref->mem_glob; |
mutex_init(&glob->device_list_mutex); |
spin_lock_init(&glob->lru_lock); |
glob->mem_glob = bo_ref->mem_glob; |
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); |
if (unlikely(glob->dummy_read_page == NULL)) { |
ret = -ENOMEM; |
goto out_no_drp; |
} |
if (unlikely(glob->dummy_read_page == NULL)) { |
ret = -ENOMEM; |
goto out_no_drp; |
} |
INIT_LIST_HEAD(&glob->swap_lru); |
INIT_LIST_HEAD(&glob->device_list); |
INIT_LIST_HEAD(&glob->swap_lru); |
INIT_LIST_HEAD(&glob->device_list); |
atomic_set(&glob->bo_count, 0); |
1134,8 → 1141,8 |
return 0; |
out_no_drp: |
kfree(glob); |
return ret; |
kfree(glob); |
return ret; |
} |
EXPORT_SYMBOL(ttm_bo_global_init); |
1254,13 → 1261,13 |
timeout = fence_wait_timeout(fence, |
interruptible, timeout); |
} |
} |
} |
if (timeout < 0) |
return timeout; |
if (timeout == 0) |
return -EBUSY; |
return -EBUSY; |
reservation_object_add_excl_fence(resv, NULL); |
clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); |
/drivers/video/drm/ttm/ttm_bo_util.c |
---|
40,8 → 40,6 |
#define __pgprot(x) ((pgprot_t) { (x) } ) |
void *vmap(struct page **pages, unsigned int count, |
unsigned long flags, pgprot_t prot); |
void ttm_bo_free_old_node(struct ttm_buffer_object *bo) |
{ |
575,6 → 573,7 |
iounmap(map->virtual); |
break; |
case ttm_bo_map_vmap: |
vunmap(map->virtual); |
break; |
case ttm_bo_map_kmap: |
kunmap(map->page); |
677,3 → 676,8 |
return vaddr; |
}; |
void vunmap(const void *addr) |
{ |
FreeKernelSpace((void*)addr); |
} |