Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 2996 → Rev 2997

/drivers/video/drm/radeon/Makefile
42,23 → 42,24
 
NAME_SRC= \
pci.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_crtc.c \
$(DRM_TOPDIR)/drm_crtc_helper.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/drm_edid.c \
$(DRM_TOPDIR)/drm_fb_helper.c \
$(DRM_TOPDIR)/drm_dp_i2c_helper.c \
$(DRM_TOPDIR)/drm_irq.c \
$(DRM_TOPDIR)/drm_mm.c \
$(DRM_TOPDIR)/drm_modes.c \
$(DRM_TOPDIR)/drm_pci.c \
$(DRM_TOPDIR)/drm_stub.c \
$(DRM_TOPDIR)/i2c/i2c-core.c \
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \
tracker/bitmap.c \
r700_vs.c \
r600_video.c \
radeon_device.c \
evergreen.c \
evergreen_blit_shaders.c \
evergreen_blit_kms.c \
evergreen_hdmi.c \
cayman_blit_shaders.c \
radeon_clocks.c \
atom.c \
72,6 → 73,8
radeon_connectors.c \
atombios_crtc.c \
atombios_dp.c \
atombios_encoders.c \
atombios_i2c.c \
radeon_encoders.c \
radeon_fence.c \
radeon_gem.c \
84,6 → 87,8
radeon_gart.c \
radeon_ring.c \
radeon_object_kos.c \
radeon_sa.c \
radeon_semaphore.c \
radeon_pm.c \
r100.c \
r200.c \
92,7 → 97,6
rv515.c \
r520.c \
r600.c \
r600_audio.c \
r600_blit_kms.c \
r600_blit_shaders.c \
r600_hdmi.c \
104,6 → 108,8
rdisplay.c \
rdisplay_kms.c \
cmdline.c \
si.c \
si_blit_shaders.c \
fwblob.asm
 
FW_BINS= \
/drivers/video/drm/radeon/ObjectID.h
85,6 → 85,7
#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA 0x1F
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 0x20
#define ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 0x21
#define ENCODER_OBJECT_ID_INTERNAL_VCE 0x24
 
#define ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO 0xFF
 
387,6 → 388,10
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_NUTMEG << OBJECT_ID_SHIFT)
 
#define ENCODER_VCE_ENUM_ID1 ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
ENCODER_OBJECT_ID_INTERNAL_VCE << OBJECT_ID_SHIFT)
 
/****************************************************/
/* Connector Object ID definition - Shared with BIOS */
/****************************************************/
/drivers/video/drm/radeon/atom.c
277,7 → 277,12
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
val = gctx->scratch[((gctx->fb_base + idx) / 4)];
if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
val = 0;
} else
val = gctx->scratch[(gctx->fb_base / 4) + idx];
if (print)
DEBUG("FB[0x%02X]", idx);
break;
531,7 → 536,11
case ATOM_ARG_FB:
idx = U8(*ptr);
(*ptr)++;
gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) {
DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n",
gctx->fb_base + (idx * 4), gctx->scratch_size_bytes);
} else
gctx->scratch[(gctx->fb_base / 4) + idx] = val;
DEBUG("FB[0x%02X]", idx);
break;
case ATOM_ARG_PLL:
714,9 → 723,26
if (arg != ATOM_COND_ALWAYS)
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
if (execute)
if (execute) {
if (ctx->last_jump == (ctx->start + target)) {
cjiffies = GetTimerTicks();
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
if ((jiffies_to_msecs(cjiffies) > 5000)) {
DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
ctx->abort = true;
}
} else {
/* jiffies wrap around we will just wait a little longer */
ctx->last_jump_jiffies = GetTimerTicks();
}
} else {
ctx->last_jump = ctx->start + target;
ctx->last_jump_jiffies = GetTimerTicks();
}
*ptr = ctx->start + target;
}
}
 
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
{
1278,8 → 1304,11
 
int atom_asic_init(struct atom_context *ctx)
{
struct radeon_device *rdev = ctx->card->dev->dev_private;
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR);
uint32_t ps[16];
int ret;
 
memset(ps, 0, 64);
 
ps[0] = cpu_to_le32(CU32(hwi + ATOM_FWI_DEFSCLK_PTR));
1289,8 → 1318,18
 
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps);
if (ret)
return ret;
 
memset(ps, 0, 64);
 
if (rdev->family < CHIP_R600) {
if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL))
atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps);
}
return ret;
}
 
void atom_destroy(struct atom_context *ctx)
{
1353,6 → 1392,7
 
usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
}
ctx->scratch_size_bytes = 0;
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
1359,5 → 1399,6
ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
if (!ctx->scratch)
return -ENOMEM;
ctx->scratch_size_bytes = usage_bytes;
return 0;
}
/drivers/video/drm/radeon/atom.h
26,7 → 26,7
#define ATOM_H
 
#include <linux/types.h>
#include "drmP.h"
#include <drm/drmP.h>
 
#define ATOM_BIOS_MAGIC 0xAA55
#define ATOM_ATI_MAGIC_PTR 0x30
44,6 → 44,7
#define ATOM_CMD_SETSCLK 0x0A
#define ATOM_CMD_SETMCLK 0x0B
#define ATOM_CMD_SETPCLK 0x0C
#define ATOM_CMD_SPDFANCNTL 0x39
 
#define ATOM_DATA_FWI_PTR 0xC
#define ATOM_DATA_IIO_PTR 0x32
137,6 → 138,7
int cs_equal, cs_above;
int io_mode;
uint32_t *scratch;
int scratch_size_bytes;
};
 
extern int atom_debug;
/drivers/video/drm/radeon/atombios.h
101,6 → 101,7
#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
#define ATOM_INIT (ATOM_DISABLE+7)
#define ATOM_GET_STATUS (ATOM_DISABLE+8)
 
#define ATOM_BLANKING 1
251,25 → 252,25
USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
USHORT EnableDispPowerGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT MemoryPLLInit;
USHORT AdjustDisplayPll; //only used by Bios
USHORT MemoryPLLInit; //Atomic Table, used only by Bios
USHORT AdjustDisplayPll; //Atomic Table, used by various SW componentes.
USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT HW_Misc_Operation; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
USHORT GetConditionalGoldenSetting; //only used by Bios
USHORT GetConditionalGoldenSetting; //Only used by Bios
USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
USHORT PatchMCSetting; //only used by BIOS
USHORT MC_SEQ_Control; //only used by BIOS
USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
USHORT EnableScaler; //Atomic Table, used only by Bios
USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
282,7 → 283,7
USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
USHORT UpdateCRTC_DoubleBufferRegisters;
USHORT UpdateCRTC_DoubleBufferRegisters; //Atomic Table, used only by Bios
USHORT LUT_AutoFill; //Atomic Table, only used by Bios
USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
308,10 → 309,10
USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
USHORT ComputeMemoryClockParam; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
USHORT GetDispObjectInfo; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
318,10 → 319,12
USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
USHORT DPEncoderService; //Function Table,only used by Bios
USHORT GetVoltageInfo; //Function Table,only used by Bios since SI
}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
 
// For backward compatible
#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
#define DPTranslatorControl DIG2EncoderControl
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
328,8 → 331,15
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
#define HPDInterruptService ReadHWAssistedI2CStatus
#define EnableVGA_Access GetSCLKOverMCLKRatio
#define GetDispObjectInfo EnableYUV
#define EnableYUV GetDispObjectInfo
#define DynamicClockGating EnableDispPowerGating
#define SetupHWAssistedI2CStatus ComputeMemoryClockParam
 
#define TMDSAEncoderControl PatchMCSetting
#define LVDSEncoderControl MC_SEQ_Control
#define LCD1OutputControl HW_Misc_Operation
 
 
typedef struct _ATOM_MASTER_COMMAND_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
495,6 → 505,34
// ucInputFlag
#define ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN 1 // 1-StrobeMode, 0-PerformanceMode
 
// use for ComputeMemoryClockParamTable
typedef struct _COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1
{
union
{
ULONG ulClock;
ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output:UPPER_WORD=FB_DIV_INTEGER, LOWER_WORD=FB_DIV_FRAC shl (16-FB_FRACTION_BITS)
};
UCHAR ucDllSpeed; //Output
UCHAR ucPostDiv; //Output
union{
UCHAR ucInputFlag; //Input : ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN: 1-StrobeMode, 0-PerformanceMode
UCHAR ucPllCntlFlag; //Output:
};
UCHAR ucBWCntl;
}COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1;
 
// definition of ucInputFlag
#define MPLL_INPUT_FLAG_STROBE_MODE_EN 0x01
// definition of ucPllCntlFlag
#define MPLL_CNTL_FLAG_VCO_MODE_MASK 0x03
#define MPLL_CNTL_FLAG_BYPASS_DQ_PLL 0x04
#define MPLL_CNTL_FLAG_QDR_ENABLE 0x08
#define MPLL_CNTL_FLAG_AD_HALF_RATE 0x10
 
//MPLL_CNTL_FLAG_BYPASS_AD_PLL has a wrong name, should be BYPASS_DQ_PLL
#define MPLL_CNTL_FLAG_BYPASS_AD_PLL 0x04
 
typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
{
ATOM_COMPUTE_CLOCK_FREQ ulClock;
562,6 → 600,16
#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
 
/****************************************************************************/
// Structure used by EnableDispPowerGatingTable.ctb
/****************************************************************************/
typedef struct _ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1
{
UCHAR ucDispPipeId; // ATOM_CRTC1, ATOM_CRTC2, ...
UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
UCHAR ucPadding[2];
}ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1;
 
/****************************************************************************/
// Structure used by EnableASIC_StaticPwrMgtTable.ctb
/****************************************************************************/
typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
807,6 → 855,7
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ 0x01
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ 0x02
#define ATOM_ENCODER_CONFIG_V4_DPLINKRATE_3_24GHZ 0x03
#define ATOM_ENCODER_CONFIG_V4_ENCODER_SEL 0x70
#define ATOM_ENCODER_CONFIG_V4_DIG0_ENCODER 0x00
#define ATOM_ENCODER_CONFIG_V4_DIG1_ENCODER 0x10
814,6 → 863,7
#define ATOM_ENCODER_CONFIG_V4_DIG3_ENCODER 0x30
#define ATOM_ENCODER_CONFIG_V4_DIG4_ENCODER 0x40
#define ATOM_ENCODER_CONFIG_V4_DIG5_ENCODER 0x50
#define ATOM_ENCODER_CONFIG_V4_DIG6_ENCODER 0x60
 
typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V4
{
1171,6 → 1221,106
#define ATOM_TRANSMITTER_CONFIG_V4_TRANSMITTER3 0x80 //EF
 
 
typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V5
{
#if ATOM_BIG_ENDIAN
UCHAR ucReservd1:1;
UCHAR ucHPDSel:3;
UCHAR ucPhyClkSrcId:2;
UCHAR ucCoherentMode:1;
UCHAR ucReserved:1;
#else
UCHAR ucReserved:1;
UCHAR ucCoherentMode:1;
UCHAR ucPhyClkSrcId:2;
UCHAR ucHPDSel:3;
UCHAR ucReservd1:1;
#endif
}ATOM_DIG_TRANSMITTER_CONFIG_V5;
 
typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
{
USHORT usSymClock; // Encoder Clock in 10kHz,(DP mode)= linkclock/10, (TMDS/LVDS/HDMI)= pixel clock, (HDMI deep color), =pixel clock * deep_color_ratio
UCHAR ucPhyId; // 0=UNIPHYA, 1=UNIPHYB, 2=UNIPHYC, 3=UNIPHYD, 4= UNIPHYE 5=UNIPHYF
UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_xxx
UCHAR ucLaneNum; // indicate lane number 1-8
UCHAR ucConnObjId; // Connector Object Id defined in ObjectId.h
UCHAR ucDigMode; // indicate DIG mode
union{
ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
UCHAR ucConfig;
};
UCHAR ucDigEncoderSel; // indicate DIG front end encoder
UCHAR ucDPLaneSet;
UCHAR ucReserved;
UCHAR ucReserved1;
}DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5;
 
//ucPhyId
#define ATOM_PHY_ID_UNIPHYA 0
#define ATOM_PHY_ID_UNIPHYB 1
#define ATOM_PHY_ID_UNIPHYC 2
#define ATOM_PHY_ID_UNIPHYD 3
#define ATOM_PHY_ID_UNIPHYE 4
#define ATOM_PHY_ID_UNIPHYF 5
#define ATOM_PHY_ID_UNIPHYG 6
 
// ucDigEncoderSel
#define ATOM_TRANMSITTER_V5__DIGA_SEL 0x01
#define ATOM_TRANMSITTER_V5__DIGB_SEL 0x02
#define ATOM_TRANMSITTER_V5__DIGC_SEL 0x04
#define ATOM_TRANMSITTER_V5__DIGD_SEL 0x08
#define ATOM_TRANMSITTER_V5__DIGE_SEL 0x10
#define ATOM_TRANMSITTER_V5__DIGF_SEL 0x20
#define ATOM_TRANMSITTER_V5__DIGG_SEL 0x40
 
// ucDigMode
#define ATOM_TRANSMITTER_DIGMODE_V5_DP 0
#define ATOM_TRANSMITTER_DIGMODE_V5_LVDS 1
#define ATOM_TRANSMITTER_DIGMODE_V5_DVI 2
#define ATOM_TRANSMITTER_DIGMODE_V5_HDMI 3
#define ATOM_TRANSMITTER_DIGMODE_V5_SDVO 4
#define ATOM_TRANSMITTER_DIGMODE_V5_DP_MST 5
 
// ucDPLaneSet
#define DP_LANE_SET__0DB_0_4V 0x00
#define DP_LANE_SET__0DB_0_6V 0x01
#define DP_LANE_SET__0DB_0_8V 0x02
#define DP_LANE_SET__0DB_1_2V 0x03
#define DP_LANE_SET__3_5DB_0_4V 0x08
#define DP_LANE_SET__3_5DB_0_6V 0x09
#define DP_LANE_SET__3_5DB_0_8V 0x0a
#define DP_LANE_SET__6DB_0_4V 0x10
#define DP_LANE_SET__6DB_0_6V 0x11
#define DP_LANE_SET__9_5DB_0_4V 0x18
 
// ATOM_DIG_TRANSMITTER_CONFIG_V5 asConfig;
// Bit1
#define ATOM_TRANSMITTER_CONFIG_V5_COHERENT 0x02
 
// Bit3:2
#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_MASK 0x0c
#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SEL_SHIFT 0x02
 
#define ATOM_TRANSMITTER_CONFIG_V5_P1PLL 0x00
#define ATOM_TRANSMITTER_CONFIG_V5_P2PLL 0x04
#define ATOM_TRANSMITTER_CONFIG_V5_P0PLL 0x08
#define ATOM_TRANSMITTER_CONFIG_V5_REFCLK_SRC_EXT 0x0c
// Bit6:4
#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_MASK 0x70
#define ATOM_TRANSMITTER_CONFIG_V5_HPD_SEL_SHIFT 0x04
 
#define ATOM_TRANSMITTER_CONFIG_V5_NO_HPD_SEL 0x00
#define ATOM_TRANSMITTER_CONFIG_V5_HPD1_SEL 0x10
#define ATOM_TRANSMITTER_CONFIG_V5_HPD2_SEL 0x20
#define ATOM_TRANSMITTER_CONFIG_V5_HPD3_SEL 0x30
#define ATOM_TRANSMITTER_CONFIG_V5_HPD4_SEL 0x40
#define ATOM_TRANSMITTER_CONFIG_V5_HPD5_SEL 0x50
#define ATOM_TRANSMITTER_CONFIG_V5_HPD6_SEL 0x60
 
#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION_V1_5 DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5
 
 
/****************************************************************************/
// Structures used by ExternalEncoderControlTable V1.3
// ASIC Families: Evergreen, Llano, NI
1793,6 → 1943,7
#define ATOM_PPLL_SS_TYPE_V3_P1PLL 0x00
#define ATOM_PPLL_SS_TYPE_V3_P2PLL 0x04
#define ATOM_PPLL_SS_TYPE_V3_DCPLL 0x08
#define ATOM_PPLL_SS_TYPE_V3_P0PLL ATOM_PPLL_SS_TYPE_V3_DCPLL
#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_MASK 0x00FF
#define ATOM_PPLL_SS_AMOUNT_V3_FBDIV_SHIFT 0
#define ATOM_PPLL_SS_AMOUNT_V3_NFRAC_MASK 0x0F00
2030,6 → 2181,33
USHORT usVoltageLevel; // real voltage level
}SET_VOLTAGE_PARAMETERS_V2;
 
 
typedef struct _SET_VOLTAGE_PARAMETERS_V1_3
{
UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
UCHAR ucVoltageMode; // Indicate action: Set voltage level
USHORT usVoltageLevel; // real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. )
}SET_VOLTAGE_PARAMETERS_V1_3;
 
//ucVoltageType
#define VOLTAGE_TYPE_VDDC 1
#define VOLTAGE_TYPE_MVDDC 2
#define VOLTAGE_TYPE_MVDDQ 3
#define VOLTAGE_TYPE_VDDCI 4
 
//SET_VOLTAGE_PARAMETERS_V3.ucVoltageMode
#define ATOM_SET_VOLTAGE 0 //Set voltage Level
#define ATOM_INIT_VOLTAGE_REGULATOR 3 //Init Regulator
#define ATOM_SET_VOLTAGE_PHASE 4 //Set Vregulator Phase
#define ATOM_GET_MAX_VOLTAGE 6 //Get Max Voltage, not used in SetVoltageTable v1.3
#define ATOM_GET_VOLTAGE_LEVEL 6 //Get Voltage level from vitual voltage ID
 
// define vitual voltage id in usVoltageLevel
#define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
#define ATOM_VIRTUAL_VOLTAGE_ID1 0xff02
#define ATOM_VIRTUAL_VOLTAGE_ID2 0xff03
#define ATOM_VIRTUAL_VOLTAGE_ID3 0xff04
 
typedef struct _SET_VOLTAGE_PS_ALLOCATION
{
SET_VOLTAGE_PARAMETERS sASICSetVoltage;
2036,6 → 2214,44
WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
}SET_VOLTAGE_PS_ALLOCATION;
 
// New Added from SI for GetVoltageInfoTable, input parameter structure
typedef struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1
{
UCHAR ucVoltageType; // Input: To tell which voltage to set up, VDDC/MVDDC/MVDDQ/VDDCI
UCHAR ucVoltageMode; // Input: Indicate action: Get voltage info
USHORT usVoltageLevel; // Input: real voltage level in unit of mv or Voltage Phase (0, 1, 2, .. ) or Leakage Id
ULONG ulReserved;
}GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_1;
 
// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_VID
typedef struct _GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
{
ULONG ulVotlageGpioState;
ULONG ulVoltageGPioMask;
}GET_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
 
// New Added from SI for GetVoltageInfoTable, output parameter structure when ucVotlageMode == ATOM_GET_VOLTAGE_STATEx_LEAKAGE_VID
typedef struct _GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1
{
USHORT usVoltageLevel;
USHORT usVoltageId; // Voltage Id programmed in Voltage Regulator
ULONG ulReseved;
}GET_LEAKAGE_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_1;
 
 
// GetVoltageInfo v1.1 ucVoltageMode
#define ATOM_GET_VOLTAGE_VID 0x00
#define ATOM_GET_VOTLAGE_INIT_SEQ 0x03
#define ATOM_GET_VOLTTAGE_PHASE_PHASE_VID 0x04
// for SI, this state map to 0xff02 voltage state in Power Play table, which is power boost state
#define ATOM_GET_VOLTAGE_STATE0_LEAKAGE_VID 0x10
 
// for SI, this state map to 0xff01 voltage state in Power Play table, which is performance state
#define ATOM_GET_VOLTAGE_STATE1_LEAKAGE_VID 0x11
// undefined power state
#define ATOM_GET_VOLTAGE_STATE2_LEAKAGE_VID 0x12
#define ATOM_GET_VOLTAGE_STATE3_LEAKAGE_VID 0x13
 
/****************************************************************************/
// Structures used by TVEncoderControlTable
/****************************************************************************/
2065,9 → 2281,9
USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
USHORT StandardVESA_Timing; // Only used by Bios
USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
USHORT DAC_Info; // Will be obsolete from R600
USHORT PaletteData; // Only used by BIOS
USHORT LCD_Info; // Shared by various SW components,latest version 1.3, was called LVDS_Info
USHORT TMDS_Info; // Will be obsolete from R600
USHORT DIGTransmitterInfo; // Internal used by VBIOS only version 3.1
USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
USHORT SupportedDevicesInfo; // Will be obsolete from R600
USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
2096,9 → 2312,6
USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
}ATOM_MASTER_LIST_OF_DATA_TABLES;
 
// For backward compatible
#define LVDS_Info LCD_Info
 
typedef struct _ATOM_MASTER_DATA_TABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
2105,6 → 2318,10
ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
}ATOM_MASTER_DATA_TABLE;
 
// For backward compatible
#define LVDS_Info LCD_Info
#define DAC_Info PaletteData
#define TMDS_Info DIGTransmitterInfo
 
/****************************************************************************/
// Structure used in MultimediaCapabilityInfoTable
2171,7 → 2388,9
typedef struct _ATOM_FIRMWARE_CAPABILITY
{
#if ATOM_BIG_ENDIAN
USHORT Reserved:3;
USHORT Reserved:1;
USHORT SCL2Redefined:1;
USHORT PostWithoutModeSet:1;
USHORT HyperMemory_Size:4;
USHORT HyperMemory_Support:1;
USHORT PPMode_Assigned:1;
2193,7 → 2412,9
USHORT PPMode_Assigned:1;
USHORT HyperMemory_Support:1;
USHORT HyperMemory_Size:4;
USHORT Reserved:3;
USHORT PostWithoutModeSet:1;
USHORT SCL2Redefined:1;
USHORT Reserved:1;
#endif
}ATOM_FIRMWARE_CAPABILITY;
 
2418,7 → 2639,8
USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
ULONG ulReserved4; //Was ulAsicMaximumVoltage
ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
ULONG ulReserved5; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
UCHAR ucRemoteDisplayConfig;
UCHAR ucReserved5[3]; //Was usMinEngineClockPLL_Input and usMaxEngineClockPLL_Input
ULONG ulReserved6; //Was usMinEngineClockPLL_Output and usMinMemoryClockPLL_Input
ULONG ulReserved7; //Was usMaxMemoryClockPLL_Input and usMinMemoryClockPLL_Output
USHORT usReserved11; //Was usMaxPixelClock; //In 10Khz unit, Max. Pclk used only for DAC
2438,6 → 2660,11
 
#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_2
 
 
// definition of ucRemoteDisplayConfig
#define REMOTE_DISPLAY_DISABLE 0x00
#define REMOTE_DISPLAY_ENABLE 0x01
 
/****************************************************************************/
// Structures used in IntegratedSystemInfoTable
/****************************************************************************/
2660,8 → 2887,9
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__GREYHOUND 2
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__K8 3
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH 4
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI 5
 
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__PHARAOH // this deff reflects max defined CPU code
#define INTEGRATED_SYSTEM_INFO__AMD_CPU__MAX_CODE INTEGRATED_SYSTEM_INFO__AMD_CPU__OROCHI // this deff reflects max defined CPU code
 
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
2753,6 → 2981,7
#define ASIC_INT_DIG4_ENCODER_ID 0x0b
#define ASIC_INT_DIG5_ENCODER_ID 0x0c
#define ASIC_INT_DIG6_ENCODER_ID 0x0d
#define ASIC_INT_DIG7_ENCODER_ID 0x0e
 
//define Encoder attribute
#define ATOM_ANALOG_ENCODER 0
3226,8 → 3455,8
 
UCHAR ucPowerSequenceDIGONtoDE_in4Ms;
UCHAR ucPowerSequenceDEtoVARY_BL_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
UCHAR ucPowerSequenceDEtoDIGON_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoDE_in4Ms;
 
UCHAR ucOffDelay_in4Ms;
UCHAR ucPowerSequenceVARY_BLtoBLON_in4Ms;
3234,7 → 3463,15
UCHAR ucPowerSequenceBLONtoVARY_BL_in4Ms;
UCHAR ucReserved1;
 
ULONG ulReserved[4];
UCHAR ucDPCD_eDP_CONFIGURATION_CAP; // dpcd 0dh
UCHAR ucDPCD_MAX_LINK_RATE; // dpcd 01h
UCHAR ucDPCD_MAX_LANE_COUNT; // dpcd 02h
UCHAR ucDPCD_MAX_DOWNSPREAD; // dpcd 03h
 
USHORT usMaxPclkFreqInSingleLink; // Max PixelClock frequency in single link mode.
UCHAR uceDPToLVDSRxId;
UCHAR ucLcdReservd;
ULONG ulReserved[2];
}ATOM_LCD_INFO_V13;
 
#define ATOM_LCD_INFO_LAST ATOM_LCD_INFO_V13
3273,6 → 3510,11
//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
#define LCDPANEL_CAP_V13_eDP 0x4 // = LCDPANEL_CAP_eDP no change comparing to previous version
 
//uceDPToLVDSRxId
#define eDP_TO_LVDS_RX_DISABLE 0x00 // no eDP->LVDS translator chip
#define eDP_TO_LVDS_COMMON_ID 0x01 // common eDP->LVDS translator chip without AMD SW init
#define eDP_TO_LVDS_RT_ID 0x02 // RT tanslator which require AMD SW init
 
typedef struct _ATOM_PATCH_RECORD_MODE
{
UCHAR ucRecordType;
3317,6 → 3559,7
#define LCD_CAP_RECORD_TYPE 3
#define LCD_FAKE_EDID_PATCH_RECORD_TYPE 4
#define LCD_PANEL_RESOLUTION_RECORD_TYPE 5
#define LCD_EDID_OFFSET_PATCH_RECORD_TYPE 6
#define ATOM_RECORD_END_TYPE 0xFF
 
/****************************Spread Spectrum Info Table Definitions **********************/
3528,6 → 3771,7
 
CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
 
/***********************************************************************************/
#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
 
typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
3818,13 → 4062,17
ATOM_DP_CONN_CHANNEL_MAPPING asDPMapping;
ATOM_DVI_CONN_CHANNEL_MAPPING asDVIMapping;
};
UCHAR ucReserved;
USHORT usReserved[2];
UCHAR ucChPNInvert; // bit vector for up to 8 lanes, =0: P and N is not invert, =1 P and N is inverted
USHORT usCaps;
USHORT usReserved;
}EXT_DISPLAY_PATH;
#define NUMBER_OF_UCHAR_FOR_GUID 16
#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
 
//usCaps
#define EXT_DISPLAY_PATH_CAPS__HBR2_DISABLE 0x01
 
typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
{
ATOM_COMMON_TABLE_HEADER sHeader;
3832,7 → 4080,9
EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
UCHAR uc3DStereoPinId; // use for eDP panel
UCHAR Reserved [6]; // for potential expansion
UCHAR ucRemoteDisplayConfig;
UCHAR uceDPToLVDSRxId;
UCHAR Reserved[4]; // for potential expansion
}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
 
//Related definitions, all records are different but they have a commond header
3977,6 → 4227,7
#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
 
// Indexes to GPIO array in GLSync record
// GLSync record is for Frame Lock/Gen Lock feature.
#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
3984,7 → 4235,9
#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
#define ATOM_GPIO_INDEX_GLSYNC_SWAP_CNTL 7
#define ATOM_GPIO_INDEX_GLSYNC_SWAP_SEL 8
#define ATOM_GPIO_INDEX_GLSYNC_MAX 9
 
typedef struct _ATOM_ENCODER_DVO_CF_RECORD
{
3994,7 → 4247,8
}ATOM_ENCODER_DVO_CF_RECORD;
 
// Bit maps for ATOM_ENCODER_CAP_RECORD.ucEncoderCap
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by this path
#define ATOM_ENCODER_CAP_RECORD_HBR2 0x01 // DP1.2 HBR2 is supported by HW encoder
#define ATOM_ENCODER_CAP_RECORD_HBR2_EN 0x02 // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
 
typedef struct _ATOM_ENCODER_CAP_RECORD
{
4003,11 → 4257,13
USHORT usEncoderCap;
struct {
#if ATOM_BIG_ENDIAN
USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
#else
USHORT usHBR2Cap:1; // Bit0 is for DP1.2 HBR2 capability.
USHORT usReserved:15; // Bit1-15 may be defined for other capability in future
USHORT usHBR2En:1; // Bit1 is for DP1.2 HBR2 enable
USHORT usReserved:14; // Bit1-15 may be defined for other capability in future
#endif
};
};
4157,6 → 4413,7
#define VOLTAGE_CONTROL_ID_VT1556M 0x07
#define VOLTAGE_CONTROL_ID_CHL822x 0x08
#define VOLTAGE_CONTROL_ID_VT1586M 0x09
#define VOLTAGE_CONTROL_ID_UP1637 0x0A
 
typedef struct _ATOM_VOLTAGE_OBJECT
{
4193,6 → 4450,69
USHORT usVoltage;
}ATOM_LEAKID_VOLTAGE;
 
typedef struct _ATOM_VOLTAGE_OBJECT_HEADER_V3{
UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
UCHAR ucVoltageMode; //Indicate voltage control mode: Init/Set/Leakage/Set phase
USHORT usSize; //Size of Object
}ATOM_VOLTAGE_OBJECT_HEADER_V3;
 
typedef struct _VOLTAGE_LUT_ENTRY_V2
{
ULONG ulVoltageId; // The Voltage ID which is used to program GPIO register
USHORT usVoltageValue; // The corresponding Voltage Value, in mV
}VOLTAGE_LUT_ENTRY_V2;
 
typedef struct _LEAKAGE_VOLTAGE_LUT_ENTRY_V2
{
USHORT usVoltageLevel; // The Voltage ID which is used to program GPIO register
USHORT usVoltageId;
USHORT usLeakageId; // The corresponding Voltage Value, in mV
}LEAKAGE_VOLTAGE_LUT_ENTRY_V2;
 
typedef struct _ATOM_I2C_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageRegulatorId; //Indicate Voltage Regulator Id
UCHAR ucVoltageControlI2cLine;
UCHAR ucVoltageControlAddress;
UCHAR ucVoltageControlOffset;
ULONG ulReserved;
VOLTAGE_LUT_ENTRY asVolI2cLut[1]; // end with 0xff
}ATOM_I2C_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_GPIO_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucVoltageGpioCntlId; // default is 0 which indicate control through CG VID mode
UCHAR ucGpioEntryNum; // indiate the entry numbers of Votlage/Gpio value Look up table
UCHAR ucPhaseDelay; // phase delay in unit of micro second
UCHAR ucReserved;
ULONG ulGpioMaskVal; // GPIO Mask value
VOLTAGE_LUT_ENTRY_V2 asVolGpioLut[1];
}ATOM_GPIO_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_LEAKAGE_VOLTAGE_OBJECT_V3
{
ATOM_VOLTAGE_OBJECT_HEADER_V3 sHeader;
UCHAR ucLeakageCntlId; // default is 0
UCHAR ucLeakageEntryNum; // indicate the entry number of LeakageId/Voltage Lut table
UCHAR ucReserved[2];
ULONG ulMaxVoltageLevel;
LEAKAGE_VOLTAGE_LUT_ENTRY_V2 asLeakageIdLut[1];
}ATOM_LEAKAGE_VOLTAGE_OBJECT_V3;
 
typedef union _ATOM_VOLTAGE_OBJECT_V3{
ATOM_GPIO_VOLTAGE_OBJECT_V3 asGpioVoltageObj;
ATOM_I2C_VOLTAGE_OBJECT_V3 asI2cVoltageObj;
ATOM_LEAKAGE_VOLTAGE_OBJECT_V3 asLeakageObj;
}ATOM_VOLTAGE_OBJECT_V3;
 
typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1
{
ATOM_COMMON_TABLE_HEADER sHeader;
ATOM_VOLTAGE_OBJECT_V3 asVoltageObj[3]; //Info for Voltage control
}ATOM_VOLTAGE_OBJECT_INFO_V3_1;
 
typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
{
UCHAR ucProfileId;
4305,7 → 4625,18
USHORT usHDMISSpreadRateIn10Hz;
USHORT usDVISSPercentage;
USHORT usDVISSpreadRateIn10Hz;
ULONG ulReserved3[21];
ULONG SclkDpmBoostMargin;
ULONG SclkDpmThrottleMargin;
USHORT SclkDpmTdpLimitPG;
USHORT SclkDpmTdpLimitBoost;
ULONG ulBoostEngineCLock;
UCHAR ulBoostVid_2bit;
UCHAR EnableBoost;
USHORT GnbTdpLimit;
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
UCHAR ucLVDSReserved;
ULONG ulReserved3[15];
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V6;
 
4313,9 → 4644,16
#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define INTEGRATED_SYSTEM_INFO_V6_GPUCAPINFO__DISABLE_AUX_HW_MODE_DETECTION 0x08
 
// ulOtherDisplayMisc
#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
//ucLVDSMisc:
#define SYS_INFO_LVDSMISC__888_FPDI_MODE 0x01
#define SYS_INFO_LVDSMISC__DL_CH_SWAP 0x02
#define SYS_INFO_LVDSMISC__888_BPC 0x04
#define SYS_INFO_LVDSMISC__OVERRIDE_EN 0x08
#define SYS_INFO_LVDSMISC__BLON_ACTIVE_LOW 0x10
 
// not used any more
#define SYS_INFO_LVDSMISC__VSYNC_ACTIVE_LOW 0x04
#define SYS_INFO_LVDSMISC__HSYNC_ACTIVE_LOW 0x08
 
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
4384,12 → 4722,213
ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
sAvail_SCLK[5]: Arrays to provide available list of SLCK and corresponding voltage, order from low to high
sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
usPCIEClkSSPercentage: PCIE Clock Spred Spectrum Percentage in unit 0.01%; 100 mean 1%.
usPCIEClkSSType: PCIE Clock Spred Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
usLvdsSSpreadRateIn10Hz: LVDS panel ( not include eDP ) Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usHDMISSPercentage: HDMI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
[bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
**********************************************************************************************************************/
 
// this Table is used for Liano/Ontario APU
typedef struct _ATOM_FUSION_SYSTEM_INFO_V1
{
ATOM_INTEGRATED_SYSTEM_INFO_V6 sIntegratedSysInfo;
ULONG ulPowerplayTable[128];
}ATOM_FUSION_SYSTEM_INFO_V1;
/**********************************************************************************************************************
ATOM_FUSION_SYSTEM_INFO_V1 Description
sIntegratedSysInfo: refer to ATOM_INTEGRATED_SYSTEM_INFO_V6 definition.
ulPowerplayTable[128]: This 512 bytes memory is used to save ATOM_PPLIB_POWERPLAYTABLE3, starting form ulPowerplayTable[0]
**********************************************************************************************************************/
 
// this IntegrateSystemInfoTable is used for Trinity APU
typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7
{
ATOM_COMMON_TABLE_HEADER sHeader;
ULONG ulBootUpEngineClock;
ULONG ulDentistVCOFreq;
ULONG ulBootUpUMAClock;
ATOM_CLK_VOLT_CAPABILITY sDISPCLK_Voltage[4];
ULONG ulBootUpReqDisplayVector;
ULONG ulOtherDisplayMisc;
ULONG ulGPUCapInfo;
ULONG ulSB_MMIO_Base_Addr;
USHORT usRequestedPWMFreqInHz;
UCHAR ucHtcTmpLmt;
UCHAR ucHtcHystLmt;
ULONG ulMinEngineClock;
ULONG ulSystemConfig;
ULONG ulCPUCapInfo;
USHORT usNBP0Voltage;
USHORT usNBP1Voltage;
USHORT usBootUpNBVoltage;
USHORT usExtDispConnInfoOffset;
USHORT usPanelRefreshRateRange;
UCHAR ucMemoryType;
UCHAR ucUMAChannelNumber;
UCHAR strVBIOSMsg[40];
ULONG ulReserved[20];
ATOM_AVAILABLE_SCLK_LIST sAvail_SCLK[5];
ULONG ulGMCRestoreResetTime;
ULONG ulMinimumNClk;
ULONG ulIdleNClk;
ULONG ulDDR_DLL_PowerUpTime;
ULONG ulDDR_PLL_PowerUpTime;
USHORT usPCIEClkSSPercentage;
USHORT usPCIEClkSSType;
USHORT usLvdsSSPercentage;
USHORT usLvdsSSpreadRateIn10Hz;
USHORT usHDMISSPercentage;
USHORT usHDMISSpreadRateIn10Hz;
USHORT usDVISSPercentage;
USHORT usDVISSpreadRateIn10Hz;
ULONG SclkDpmBoostMargin;
ULONG SclkDpmThrottleMargin;
USHORT SclkDpmTdpLimitPG;
USHORT SclkDpmTdpLimitBoost;
ULONG ulBoostEngineCLock;
UCHAR ulBoostVid_2bit;
UCHAR EnableBoost;
USHORT GnbTdpLimit;
USHORT usMaxLVDSPclkFreqInSingleLink;
UCHAR ucLvdsMisc;
UCHAR ucLVDSReserved;
UCHAR ucLVDSPwrOnSeqDIGONtoDE_in4Ms;
UCHAR ucLVDSPwrOnSeqDEtoVARY_BL_in4Ms;
UCHAR ucLVDSPwrOffSeqVARY_BLtoDE_in4Ms;
UCHAR ucLVDSPwrOffSeqDEtoDIGON_in4Ms;
UCHAR ucLVDSOffToOnDelay_in4Ms;
UCHAR ucLVDSPwrOnSeqVARY_BLtoBLON_in4Ms;
UCHAR ucLVDSPwrOffSeqBLONtoVARY_BL_in4Ms;
UCHAR ucLVDSReserved1;
ULONG ulLCDBitDepthControlVal;
ULONG ulNbpStateMemclkFreq[4];
USHORT usNBP2Voltage;
USHORT usNBP3Voltage;
ULONG ulNbpStateNClkFreq[4];
UCHAR ucNBDPMEnable;
UCHAR ucReserved[3];
UCHAR ucDPMState0VclkFid;
UCHAR ucDPMState0DclkFid;
UCHAR ucDPMState1VclkFid;
UCHAR ucDPMState1DclkFid;
UCHAR ucDPMState2VclkFid;
UCHAR ucDPMState2DclkFid;
UCHAR ucDPMState3VclkFid;
UCHAR ucDPMState3DclkFid;
ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
}ATOM_INTEGRATED_SYSTEM_INFO_V1_7;
 
// ulOtherDisplayMisc
#define INTEGRATED_SYSTEM_INFO__GET_EDID_CALLBACK_FUNC_SUPPORT 0x01
#define INTEGRATED_SYSTEM_INFO__GET_BOOTUP_DISPLAY_CALLBACK_FUNC_SUPPORT 0x02
#define INTEGRATED_SYSTEM_INFO__GET_EXPANSION_CALLBACK_FUNC_SUPPORT 0x04
#define INTEGRATED_SYSTEM_INFO__FAST_BOOT_SUPPORT 0x08
 
// ulGPUCapInfo
#define SYS_INFO_GPUCAPS__TMDSHDMI_COHERENT_SINGLEPLL_MODE 0x01
#define SYS_INFO_GPUCAPS__DP_SINGLEPLL_MODE 0x02
#define SYS_INFO_GPUCAPS__DISABLE_AUX_MODE_DETECT 0x08
 
/**********************************************************************************************************************
ATOM_INTEGRATED_SYSTEM_INFO_V1_7 Description
ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit. if it is equal 0, then VBIOS use pre-defined bootup engine clock
ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
sDISPCLK_Voltage: Report Display clock voltage requirement.
ulBootUpReqDisplayVector: VBIOS boot up display IDs, following are supported devices in Trinity projects:
ATOM_DEVICE_CRT1_SUPPORT 0x0001
ATOM_DEVICE_DFP1_SUPPORT 0x0008
ATOM_DEVICE_DFP6_SUPPORT 0x0040
ATOM_DEVICE_DFP2_SUPPORT 0x0080
ATOM_DEVICE_DFP3_SUPPORT 0x0200
ATOM_DEVICE_DFP4_SUPPORT 0x0400
ATOM_DEVICE_DFP5_SUPPORT 0x0800
ATOM_DEVICE_LCD1_SUPPORT 0x0002
ulOtherDisplayMisc: bit[0]=0: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is not supported by SBIOS.
=1: INT15 callback function Get LCD EDID ( ax=4e08, bl=1b ) is supported by SBIOS.
bit[1]=0: INT15 callback function Get boot display( ax=4e08, bl=01h) is not supported by SBIOS
=1: INT15 callback function Get boot display( ax=4e08, bl=01h) is supported by SBIOS
bit[2]=0: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is not supported by SBIOS
=1: INT15 callback function Get panel Expansion ( ax=4e08, bl=02h) is supported by SBIOS
bit[3]=0: VBIOS fast boot is disable
=1: VBIOS fast boot is enable. ( VBIOS skip display device detection in every set mode if LCD panel is connect and LID is open)
ulGPUCapInfo: bit[0]=0: TMDS/HDMI Coherent Mode use cascade PLL mode.
=1: TMDS/HDMI Coherent Mode use signel PLL mode.
bit[1]=0: DP mode use cascade PLL mode ( New for Trinity )
=1: DP mode use single PLL mode
bit[3]=0: Enable AUX HW mode detection logic
=1: Disable AUX HW mode detection logic
ulSB_MMIO_Base_Addr: Physical Base address to SB MMIO space. Driver needs to initialize it for SMU usage.
 
usRequestedPWMFreqInHz: When it's set to 0x0 by SBIOS: the LCD BackLight is not controlled by GPU(SW).
Any attempt to change BL using VBIOS function or enable VariBri from PP table is not effective since ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==0;
When it's set to a non-zero frequency, the BackLight is controlled by GPU (SW) in one of two ways below:
1. SW uses the GPU BL PWM output to control the BL, in chis case, this non-zero frequency determines what freq GPU should use;
VBIOS will set up proper PWM frequency and ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1,as the result,
Changing BL using VBIOS function is functional in both driver and non-driver present environment;
and enabling VariBri under the driver environment from PP table is optional.
 
2. SW uses other means to control BL (like DPCD),this non-zero frequency serves as a flag only indicating
that BL control from GPU is expected.
VBIOS will NOT set up PWM frequency but make ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU==1
Changing BL using VBIOS function could be functional in both driver and non-driver present environment,but
it's per platform
and enabling VariBri under the driver environment from PP table is optional.
 
ucHtcTmpLmt: Refer to D18F3x64 bit[22:16], HtcTmpLmt.
Threshold on value to enter HTC_active state.
ucHtcHystLmt: Refer to D18F3x64 bit[27:24], HtcHystLmt.
To calculate threshold off value to exit HTC_active state, which is Threshold on vlaue minus ucHtcHystLmt.
ulMinEngineClock: Minimum SCLK allowed in 10kHz unit. This is calculated based on WRCK Fuse settings.
ulSystemConfig: Bit[0]=0: PCIE Power Gating Disabled
=1: PCIE Power Gating Enabled
Bit[1]=0: DDR-DLL shut-down feature disabled.
1: DDR-DLL shut-down feature enabled.
Bit[2]=0: DDR-PLL Power down feature disabled.
1: DDR-PLL Power down feature enabled.
ulCPUCapInfo: TBD
usNBP0Voltage: VID for voltage on NB P0 State
usNBP1Voltage: VID for voltage on NB P1 State
usNBP2Voltage: VID for voltage on NB P2 State
usNBP3Voltage: VID for voltage on NB P3 State
usBootUpNBVoltage: Voltage Index of GNB voltage configured by SBIOS, which is suffcient to support VBIOS DISPCLK requirement.
usExtDispConnInfoOffset: Offset to sExtDispConnInfo inside the structure
usPanelRefreshRateRange: Bit vector for LCD supported refresh rate range. If DRR is requestd by the platform, at least two bits need to be set
to indicate a range.
SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
ucMemoryType: [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
ucUMAChannelNumber: System memory channel numbers.
ulCSR_M3_ARB_CNTL_DEFAULT[10]: Arrays with values for CSR M3 arbiter for default
ulCSR_M3_ARB_CNTL_UVD[10]: Arrays with values for CSR M3 arbiter for UVD playback.
ulCSR_M3_ARB_CNTL_FS3D[10]: Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
sAvail_SCLK[5]: Arrays to provide availabe list of SLCK and corresponding voltage, order from low to high
ulGMCRestoreResetTime: GMC power restore and GMC reset time to calculate data reconnection latency. Unit in ns.
ulMinimumNClk: Minimum NCLK speed among all NB-Pstates to calcualte data reconnection latency. Unit in 10kHz.
ulIdleNClk: NCLK speed while memory runs in self-refresh state. Unit in 10kHz.
ulDDR_DLL_PowerUpTime: DDR PHY DLL power up time. Unit in ns.
ulDDR_PLL_PowerUpTime: DDR PHY PLL power up time. Unit in ns.
usPCIEClkSSPercentage: PCIE Clock Spread Spectrum Percentage in unit 0.01%; 100 mean 1%.
usPCIEClkSSType: PCIE Clock Spread Spectrum Type. 0 for Down spread(default); 1 for Center spread.
usLvdsSSPercentage: LVDS panel ( not include eDP ) Spread Spectrum Percentage in unit of 0.01%, =0, use VBIOS default setting.
4398,6 → 4937,41
usHDMISSpreadRateIn10Hz: HDMI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usDVISSPercentage: DVI Spread Spectrum Percentage in unit 0.01%; 100 mean 1%, =0, use VBIOS default setting.
usDVISSpreadRateIn10Hz: DVI Spread Spectrum frequency in unit of 10Hz, =0, use VBIOS default setting.
usMaxLVDSPclkFreqInSingleLink: Max pixel clock LVDS panel single link, if=0 means VBIOS use default threhold, right now it is 85Mhz
ucLVDSMisc: [bit0] LVDS 888bit panel mode =0: LVDS 888 panel in LDI mode, =1: LVDS 888 panel in FPDI mode
[bit1] LVDS panel lower and upper link mapping =0: lower link and upper link not swap, =1: lower link and upper link are swapped
[bit2] LVDS 888bit per color mode =0: 666 bit per color =1:888 bit per color
[bit3] LVDS parameter override enable =0: ucLvdsMisc parameter are not used =1: ucLvdsMisc parameter should be used
[bit4] Polarity of signal sent to digital BLON output pin. =0: not inverted(active high) =1: inverted ( active low )
ucLVDSPwrOnSeqDIGONtoDE_in4Ms: LVDS power up sequence time in unit of 4ms, time delay from DIGON signal active to data enable signal active( DE ).
=0 mean use VBIOS default which is 8 ( 32ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
ucLVDSPwrOnDEtoVARY_BL_in4Ms: LVDS power up sequence time in unit of 4ms., time delay from DE( data enable ) active to Vary Brightness enable signal active( VARY_BL ).
=0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power up sequence is as following: DIGON->DE->VARY_BL->BLON.
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffVARY_BLtoDE_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from data enable ( DE ) signal off to LCDVCC (DIGON) off.
=0 mean use VBIOS default delay which is 8 ( 32ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffDEtoDIGON_in4Ms: LVDS power down sequence time in unit of 4ms, time delay from vary brightness enable signal( VARY_BL) off to data enable ( DE ) signal off.
=0 mean use VBIOS default which is 90 ( 360ms ). The LVDS power down sequence is as following: BLON->VARY_BL->DE->DIGON
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSOffToOnDelay_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from DIGON signal off to DIGON signal active.
=0 means to use VBIOS default delay which is 125 ( 500ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOnVARY_BLtoBLON_in4Ms: LVDS power up sequence time in unit of 4ms. Time delay from VARY_BL signal on to DLON signal active.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ucLVDSPwrOffBLONtoVARY_BL_in4Ms: LVDS power down sequence time in unit of 4ms. Time delay from BLON signal off to VARY_BL signal off.
=0 means to use VBIOS default delay which is 0 ( 0ms ).
This parameter is used by VBIOS only. VBIOS will patch LVDS_InfoTable.
 
ulNbpStateMemclkFreq[4]: system memory clock frequncey in unit of 10Khz in different NB pstate.
 
**********************************************************************************************************************/
 
/**************************************************************************/
4459,6 → 5033,7
#define ASIC_INTERNAL_SS_ON_DP 7
#define ASIC_INTERNAL_SS_ON_DCPLL 8
#define ASIC_EXTERNAL_SS_ON_DP_CLOCK 9
#define ASIC_INTERNAL_VCE_SS 10
 
typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
{
4520,8 → 5095,8
#define ATOM_DOS_MODE_INFO_DEF 7
#define ATOM_I2C_CHANNEL_STATUS_DEF 8
#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
#define ATOM_INTERNAL_TIMER_DEF 10
 
 
// BIOS_0_SCRATCH Definition
#define ATOM_S0_CRT1_MONO 0x00000001L
#define ATOM_S0_CRT1_COLOR 0x00000002L
4648,6 → 5223,7
#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGEb3 0x10
#define ATOM_S2_TMDS_COHERENT_MODEb3 0x10 // used by VBIOS code only, use coherent mode for TMDS/HDMI mode
#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
 
5038,6 → 5614,23
USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
 
typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4
{
USHORT usHight; // Image Hight
USHORT usWidth; // Image Width
USHORT usGraphPitch;
UCHAR ucColorDepth;
UCHAR ucPixelFormat;
UCHAR ucSurface; // Surface 1 or 2
UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
UCHAR ucModeType;
UCHAR ucReserved;
}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_4;
 
// ucEnable
#define ATOM_GRAPH_CONTROL_SET_PITCH 0x0f
#define ATOM_GRAPH_CONTROL_SET_DISP_START 0x10
 
typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
{
ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
5057,6 → 5650,58
USHORT usY_Size;
}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
 
typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2
{
union{
USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
USHORT usSurface;
};
USHORT usY_Size;
USHORT usDispXStart;
USHORT usDispYStart;
}GET_DISPLAY_SURFACE_SIZE_PARAMETERS_V2;
 
 
typedef struct _PALETTE_DATA_CONTROL_PARAMETERS_V3
{
UCHAR ucLutId;
UCHAR ucAction;
USHORT usLutStartIndex;
USHORT usLutLength;
USHORT usLutOffsetInVram;
}PALETTE_DATA_CONTROL_PARAMETERS_V3;
 
// ucAction:
#define PALETTE_DATA_AUTO_FILL 1
#define PALETTE_DATA_READ 2
#define PALETTE_DATA_WRITE 3
 
 
typedef struct _INTERRUPT_SERVICE_PARAMETERS_V2
{
UCHAR ucInterruptId;
UCHAR ucServiceId;
UCHAR ucStatus;
UCHAR ucReserved;
}INTERRUPT_SERVICE_PARAMETER_V2;
 
// ucInterruptId
#define HDP1_INTERRUPT_ID 1
#define HDP2_INTERRUPT_ID 2
#define HDP3_INTERRUPT_ID 3
#define HDP4_INTERRUPT_ID 4
#define HDP5_INTERRUPT_ID 5
#define HDP6_INTERRUPT_ID 6
#define SW_INTERRUPT_ID 11
 
// ucAction
#define INTERRUPT_SERVICE_GEN_SW_INT 1
#define INTERRUPT_SERVICE_GET_STATUS 2
 
// ucStatus
#define INTERRUPT_STATUS__INT_TRIGGER 1
#define INTERRUPT_STATUS__HPD_HIGH 2
 
typedef struct _INDIRECT_IO_ACCESS
{
ATOM_COMMON_TABLE_HEADER sHeader;
5189,7 → 5834,7
 
#define END_OF_REG_INDEX_BLOCK 0x0ffff
#define END_OF_REG_DATA_BLOCK 0x00000000
#define ATOM_INIT_REG_MASK_FLAG 0x80
#define ATOM_INIT_REG_MASK_FLAG 0x80 //Not used in BIOS
#define CLOCK_RANGE_HIGHEST 0x00ffffff
 
#define VALUE_DWORD SIZEOF ULONG
5229,6 → 5874,7
#define _128Mx8 0x51
#define _128Mx16 0x52
#define _256Mx8 0x61
#define _256Mx16 0x62
 
#define SAMSUNG 0x1
#define INFINEON 0x2
5585,7 → 6231,7
ULONG ulChannelMapCfg; // mmMC_SHARED_CHREMAP
USHORT usModuleSize; // Size of ATOM_VRAM_MODULE_V7
USHORT usPrivateReserved; // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
USHORT usReserved;
USHORT usEnableChannels; // bit vector which indicate which channels are enabled
UCHAR ucExtMemoryID; // Current memory module ID
UCHAR ucMemoryType; // MEM_TYPE_DDR2/DDR3/GDDR3/GDDR5
UCHAR ucChannelNum; // Number of mem. channels supported in this module
5597,7 → 6243,8
UCHAR ucNPL_RT; // Round trip delay (MC_SEQ_CAS_TIMING [28:24]:TCL=CL+NPL_RT-2). Always 2.
UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
UCHAR ucMemorySize; // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
UCHAR ucReserved[3];
USHORT usSEQSettingOffset;
UCHAR ucReserved;
// Memory Module specific values
USHORT usEMRS2Value; // EMRS2/MR2 Value.
USHORT usEMRS3Value; // EMRS3/MR3 Value.
5650,7 → 6297,8
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
USHORT usReserved[4];
USHORT usPerBytePresetOffset; // offset of ATOM_INIT_REG_BLOCK structure for Per Byte Offset Preset Settings
USHORT usReserved[3];
UCHAR ucNumOfVRAMModule; // indicate number of VRAM module
UCHAR ucMemoryClkPatchTblVer; // version of memory AC timing register list
UCHAR ucVramModuleVer; // indicate ATOM_VRAM_MODUE version
5935,6 → 6583,52
ASIC_ENCODER_INFO asEncoderInfo[1];
}ATOM_DISP_OUT_INFO_V2;
 
 
typedef struct _ATOM_DISP_CLOCK_ID {
UCHAR ucPpllId;
UCHAR ucPpllAttribute;
}ATOM_DISP_CLOCK_ID;
 
// ucPpllAttribute
#define CLOCK_SOURCE_SHAREABLE 0x01
#define CLOCK_SOURCE_DP_MODE 0x02
#define CLOCK_SOURCE_NONE_DP_MODE 0x04
 
//DispOutInfoTable
typedef struct _ASIC_TRANSMITTER_INFO_V2
{
USHORT usTransmitterObjId;
USHORT usDispClkIdOffset; // point to clock source id list supported by Encoder Object
UCHAR ucTransmitterCmdTblId;
UCHAR ucConfig;
UCHAR ucEncoderID; // available 1st encoder ( default )
UCHAR ucOptionEncoderID; // available 2nd encoder ( optional )
UCHAR uc2ndEncoderID;
UCHAR ucReserved;
}ASIC_TRANSMITTER_INFO_V2;
 
typedef struct _ATOM_DISP_OUT_INFO_V3
{
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT ptrTransmitterInfo;
USHORT ptrEncoderInfo;
USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
USHORT usReserved;
UCHAR ucDCERevision;
UCHAR ucMaxDispEngineNum;
UCHAR ucMaxActiveDispEngineNum;
UCHAR ucMaxPPLLNum;
UCHAR ucCoreRefClkSource; // value of CORE_REF_CLK_SOURCE
UCHAR ucReserved[3];
ASIC_TRANSMITTER_INFO_V2 asTransmitterInfo[1]; // for alligment only
}ATOM_DISP_OUT_INFO_V3;
 
typedef enum CORE_REF_CLK_SOURCE{
CLOCK_SRC_XTALIN=0,
CLOCK_SRC_XO_IN=1,
CLOCK_SRC_XO_IN2=2,
}CORE_REF_CLK_SOURCE;
 
// DispDevicePriorityInfo
typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
{
6070,6 → 6764,39
#define HW_I2C_READ 0
#define I2C_2BYTE_ADDR 0x02
 
/****************************************************************************/
// Structures used by HW_Misc_OperationTable
/****************************************************************************/
typedef struct _ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1
{
UCHAR ucCmd; // Input: To tell which action to take
UCHAR ucReserved[3];
ULONG ulReserved;
}ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1;
 
typedef struct _ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1
{
UCHAR ucReturnCode; // Output: Return value base on action was taken
UCHAR ucReserved[3];
ULONG ulReserved;
}ATOM_HW_MISC_OPERATION_OUTPUT_PARAMETER_V1_1;
 
// Actions code
#define ATOM_GET_SDI_SUPPORT 0xF0
 
// Return code
#define ATOM_UNKNOWN_CMD 0
#define ATOM_FEATURE_NOT_SUPPORTED 1
#define ATOM_FEATURE_SUPPORTED 2
 
typedef struct _ATOM_HW_MISC_OPERATION_PS_ALLOCATION
{
ATOM_HW_MISC_OPERATION_INPUT_PARAMETER_V1_1 sInput_Output;
PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS sReserved;
}ATOM_HW_MISC_OPERATION_PS_ALLOCATION;
 
/****************************************************************************/
 
typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
{
UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
6090,6 → 6817,52
#define SELECT_CRTC_PIXEL_RATE 7
#define SELECT_VGA_BLK 8
 
// DIGTransmitterInfoTable structure used to program UNIPHY settings
typedef struct _DIG_TRANSMITTER_INFO_HEADER_V3_1{
ATOM_COMMON_TABLE_HEADER sHeader;
USHORT usDPVsPreEmphSettingOffset; // offset of PHY_ANALOG_SETTING_INFO * with DP Voltage Swing and Pre-Emphasis for each Link clock
USHORT usPhyAnalogRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with None-DP mode Analog Setting's register Info
USHORT usPhyAnalogSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with None-DP mode Analog Setting for each link clock range
USHORT usPhyPllRegListOffset; // offset of CLOCK_CONDITION_REGESTER_INFO* with Phy Pll register Info
USHORT usPhyPllSettingOffset; // offset of CLOCK_CONDITION_SETTING_ENTRY* with Phy Pll Settings
}DIG_TRANSMITTER_INFO_HEADER_V3_1;
 
typedef struct _CLOCK_CONDITION_REGESTER_INFO{
USHORT usRegisterIndex;
UCHAR ucStartBit;
UCHAR ucEndBit;
}CLOCK_CONDITION_REGESTER_INFO;
 
typedef struct _CLOCK_CONDITION_SETTING_ENTRY{
USHORT usMaxClockFreq;
UCHAR ucEncodeMode;
UCHAR ucPhySel;
ULONG ulAnalogSetting[1];
}CLOCK_CONDITION_SETTING_ENTRY;
 
typedef struct _CLOCK_CONDITION_SETTING_INFO{
USHORT usEntrySize;
CLOCK_CONDITION_SETTING_ENTRY asClkCondSettingEntry[1];
}CLOCK_CONDITION_SETTING_INFO;
 
typedef struct _PHY_CONDITION_REG_VAL{
ULONG ulCondition;
ULONG ulRegVal;
}PHY_CONDITION_REG_VAL;
 
typedef struct _PHY_CONDITION_REG_INFO{
USHORT usRegIndex;
USHORT usSize;
PHY_CONDITION_REG_VAL asRegVal[1];
}PHY_CONDITION_REG_INFO;
 
typedef struct _PHY_ANALOG_SETTING_INFO{
UCHAR ucEncodeMode;
UCHAR ucPhySel;
USHORT usSize;
PHY_CONDITION_REG_INFO asAnalogSetting[1];
}PHY_ANALOG_SETTING_INFO;
 
/****************************************************************************/
//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
6497,6 → 7270,8
#define ATOM_PP_THERMALCONTROLLER_EMC2103 13 /* 0x0D */ // Only fan control will be implemented, do NOT show this in PPGen.
#define ATOM_PP_THERMALCONTROLLER_SUMO 14 /* 0x0E */ // Sumo type, used internally
#define ATOM_PP_THERMALCONTROLLER_NISLANDS 15
#define ATOM_PP_THERMALCONTROLLER_SISLANDS 16
#define ATOM_PP_THERMALCONTROLLER_LM96163 17
 
// Thermal controller 'combo type' to use an external controller for Fan control and an internal controller for thermal.
// We probably should reserve the bit 0x80 for this use.
6512,6 → 7287,7
UCHAR ucClockStateIndices[1]; // variable-sized
} ATOM_PPLIB_STATE;
 
 
typedef struct _ATOM_PPLIB_FANTABLE
{
UCHAR ucFanTableFormat; // Change this if the table format changes or version changes so that the other fields are not the same.
6524,6 → 7300,12
USHORT usPWMHigh; // The PWM value at THigh.
} ATOM_PPLIB_FANTABLE;
 
typedef struct _ATOM_PPLIB_FANTABLE2
{
ATOM_PPLIB_FANTABLE basicTable;
USHORT usTMax; // The max temperature
} ATOM_PPLIB_FANTABLE2;
 
typedef struct _ATOM_PPLIB_EXTENDEDHEADER
{
USHORT usSize;
6530,6 → 7312,8
ULONG ulMaxEngineClock; // For Overdrive.
ULONG ulMaxMemoryClock; // For Overdrive.
// Add extra system parameters here, always adjust size to include all fields.
USHORT usVCETableOffset; //points to ATOM_PPLIB_VCE_Table
USHORT usUVDTableOffset; //points to ATOM_PPLIB_UVD_Table
} ATOM_PPLIB_EXTENDEDHEADER;
 
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
6552,6 → 7336,7
#define ATOM_PP_PLATFORM_CAP_REGULATOR_HOT 0x00010000 // Enable the 'regulator hot' feature.
#define ATOM_PP_PLATFORM_CAP_BACO 0x00020000 // Does the driver supports BACO state.
 
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
{
ATOM_COMMON_TABLE_HEADER sHeader;
6610,7 → 7395,8
USHORT usVddciDependencyOnMCLKOffset;
USHORT usVddcDependencyOnMCLKOffset;
USHORT usMaxClockVoltageOnDCOffset;
USHORT usReserved[2];
USHORT usVddcPhaseShedLimitsTableOffset; // Points to ATOM_PPLIB_PhaseSheddingLimits_Table
USHORT usReserved;
} ATOM_PPLIB_POWERPLAYTABLE4, *LPATOM_PPLIB_POWERPLAYTABLE4;
 
typedef struct _ATOM_PPLIB_POWERPLAYTABLE5
6620,8 → 7406,9
ULONG ulNearTDPLimit;
ULONG ulSQRampingThreshold;
USHORT usCACLeakageTableOffset; // Points to ATOM_PPLIB_CAC_Leakage_Table
ULONG ulCACLeakage; // TBD, this parameter is still under discussion. Change to ulReserved if not needed.
ULONG ulReserved;
ULONG ulCACLeakage; // The iLeakage for driver calculated CAC leakage table
USHORT usTDPODLimit;
USHORT usLoadLineSlope; // in milliOhms * 100
} ATOM_PPLIB_POWERPLAYTABLE5, *LPATOM_PPLIB_POWERPLAYTABLE5;
 
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
6650,6 → 7437,7
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification2
#define ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2 0x0001
#define ATOM_PPLIB_CLASSIFICATION2_ULV 0x0002
#define ATOM_PPLIB_CLASSIFICATION2_MVC 0x0004 //Multi-View Codec (BD-3D)
 
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
6673,7 → 7461,9
 
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
 
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
 
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
 
//memory related flags
6754,6 → 7544,24
 
} ATOM_PPLIB_EVERGREEN_CLOCK_INFO;
 
typedef struct _ATOM_PPLIB_SI_CLOCK_INFO
{
USHORT usEngineClockLow;
UCHAR ucEngineClockHigh;
 
USHORT usMemoryClockLow;
UCHAR ucMemoryClockHigh;
 
USHORT usVDDC;
USHORT usVDDCI;
UCHAR ucPCIEGen;
UCHAR ucUnused1;
 
ULONG ulFlags; // ATOM_PPLIB_SI_FLAGS_*, no flag is necessary for now
 
} ATOM_PPLIB_SI_CLOCK_INFO;
 
 
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
 
{
6766,7 → 7574,7
UCHAR ucPadding; // For proper alignment and size.
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requirement.
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
6788,10 → 7596,8
USHORT usEngineClockLow; //clockfrequency & 0xFFFF. The unit is in 10khz
UCHAR ucEngineClockHigh; //clockfrequency >> 16.
UCHAR vddcIndex; //2-bit vddc index;
UCHAR leakage; //please use 8-bit absolute value, not the 6-bit % value
USHORT tdpLimit;
//please initalize to 0
UCHAR rsv;
//please initalize to 0
USHORT rsv1;
//please initialize to 0s
ULONG rsv2[2];
6813,7 → 7619,7
UCHAR clockInfoIndex[1];
} ATOM_PPLIB_STATE_V2;
 
typedef struct StateArray{
typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
6821,18 → 7627,17
}StateArray;
 
 
typedef struct ClockInfoArray{
typedef struct _ClockInfoArray{
//how many clock levels we have
UCHAR ucNumEntries;
//sizeof(ATOM_PPLIB_SUMO_CLOCK_INFO)
//sizeof(ATOM_PPLIB_CLOCK_INFO)
UCHAR ucEntrySize;
//this is for Sumo
ATOM_PPLIB_SUMO_CLOCK_INFO clockInfo[1];
UCHAR clockInfo[1];
}ClockInfoArray;
 
typedef struct NonClockInfoArray{
typedef struct _NonClockInfoArray{
 
//how many non-clock levels we have. normally should be same as number of states
UCHAR ucNumEntries;
6871,6 → 7676,124
ATOM_PPLIB_Clock_Voltage_Limit_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_CAC_Leakage_Record
{
USHORT usVddc; // We use this field for the "fake" standardized VDDC for power calculations
ULONG ulLeakageValue;
}ATOM_PPLIB_CAC_Leakage_Record;
 
typedef struct _ATOM_PPLIB_CAC_Leakage_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_CAC_Leakage_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_CAC_Leakage_Table;
 
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Record
{
USHORT usVoltage;
USHORT usSclkLow;
UCHAR ucSclkHigh;
USHORT usMclkLow;
UCHAR ucMclkHigh;
}ATOM_PPLIB_PhaseSheddingLimits_Record;
 
typedef struct _ATOM_PPLIB_PhaseSheddingLimits_Table
{
UCHAR ucNumEntries; // Number of entries.
ATOM_PPLIB_PhaseSheddingLimits_Record entries[1]; // Dynamically allocate entries.
}ATOM_PPLIB_PhaseSheddingLimits_Table;
 
typedef struct _VCEClockInfo{
USHORT usEVClkLow;
UCHAR ucEVClkHigh;
USHORT usECClkLow;
UCHAR ucECClkHigh;
}VCEClockInfo;
 
typedef struct _VCEClockInfoArray{
UCHAR ucNumEntries;
VCEClockInfo entries[1];
}VCEClockInfoArray;
 
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
{
USHORT usVoltage;
UCHAR ucVCEClockInfoIndex;
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
{
UCHAR numEntries;
ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record entries[1];
}ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_VCE_State_Record
{
UCHAR ucVCEClockInfoIndex;
UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
}ATOM_PPLIB_VCE_State_Record;
 
typedef struct _ATOM_PPLIB_VCE_State_Table
{
UCHAR numEntries;
ATOM_PPLIB_VCE_State_Record entries[1];
}ATOM_PPLIB_VCE_State_Table;
 
 
typedef struct _ATOM_PPLIB_VCE_Table
{
UCHAR revid;
// VCEClockInfoArray array;
// ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table limits;
// ATOM_PPLIB_VCE_State_Table states;
}ATOM_PPLIB_VCE_Table;
 
 
typedef struct _UVDClockInfo{
USHORT usVClkLow;
UCHAR ucVClkHigh;
USHORT usDClkLow;
UCHAR ucDClkHigh;
}UVDClockInfo;
 
typedef struct _UVDClockInfoArray{
UCHAR ucNumEntries;
UVDClockInfo entries[1];
}UVDClockInfoArray;
 
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
{
USHORT usVoltage;
UCHAR ucUVDClockInfoIndex;
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record;
 
typedef struct _ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
{
UCHAR numEntries;
ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record entries[1];
}ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table;
 
typedef struct _ATOM_PPLIB_UVD_State_Record
{
UCHAR ucUVDClockInfoIndex;
UCHAR ucClockInfoIndex; //highest 2 bits indicates memory p-states, lower 6bits indicates index to ClockInfoArrary
}ATOM_PPLIB_UVD_State_Record;
 
typedef struct _ATOM_PPLIB_UVD_State_Table
{
UCHAR numEntries;
ATOM_PPLIB_UVD_State_Record entries[1];
}ATOM_PPLIB_UVD_State_Table;
 
 
typedef struct _ATOM_PPLIB_UVD_Table
{
UCHAR revid;
// UVDClockInfoArray array;
// ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table limits;
// ATOM_PPLIB_UVD_State_Table states;
}ATOM_PPLIB_UVD_Table;
 
/**************************************************************************/
 
 
7020,4 → 7943,68
 
#pragma pack() // BIOS data must use byte aligment
 
//
// AMD ACPI Table
//
#pragma pack(1)
 
typedef struct {
ULONG Signature;
ULONG TableLength; //Length
UCHAR Revision;
UCHAR Checksum;
UCHAR OemId[6];
UCHAR OemTableId[8]; //UINT64 OemTableId;
ULONG OemRevision;
ULONG CreatorId;
ULONG CreatorRevision;
} AMD_ACPI_DESCRIPTION_HEADER;
/*
//EFI_ACPI_DESCRIPTION_HEADER from AcpiCommon.h
typedef struct {
UINT32 Signature; //0x0
UINT32 Length; //0x4
UINT8 Revision; //0x8
UINT8 Checksum; //0x9
UINT8 OemId[6]; //0xA
UINT64 OemTableId; //0x10
UINT32 OemRevision; //0x18
UINT32 CreatorId; //0x1C
UINT32 CreatorRevision; //0x20
}EFI_ACPI_DESCRIPTION_HEADER;
*/
typedef struct {
AMD_ACPI_DESCRIPTION_HEADER SHeader;
UCHAR TableUUID[16]; //0x24
ULONG VBIOSImageOffset; //0x34. Offset to the first GOP_VBIOS_CONTENT block from the beginning of the stucture.
ULONG Lib1ImageOffset; //0x38. Offset to the first GOP_LIB1_CONTENT block from the beginning of the stucture.
ULONG Reserved[4]; //0x3C
}UEFI_ACPI_VFCT;
 
typedef struct {
ULONG PCIBus; //0x4C
ULONG PCIDevice; //0x50
ULONG PCIFunction; //0x54
USHORT VendorID; //0x58
USHORT DeviceID; //0x5A
USHORT SSVID; //0x5C
USHORT SSID; //0x5E
ULONG Revision; //0x60
ULONG ImageLength; //0x64
}VFCT_IMAGE_HEADER;
 
 
typedef struct {
VFCT_IMAGE_HEADER VbiosHeader;
UCHAR VbiosContent[1];
}GOP_VBIOS_CONTENT;
 
typedef struct {
VFCT_IMAGE_HEADER Lib1Header;
UCHAR Lib1Content[1];
}GOP_LIB1_CONTENT;
 
#pragma pack()
 
 
#endif /* _ATOMBIOS_H */
/drivers/video/drm/radeon/atombios_crtc.c
83,26 → 83,20
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
ENABLE_SCALER_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler);
 
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
/* fixme - fill in enc_priv for atom dac */
enum radeon_tv_std tv_std = TV_STD_NTSC;
bool is_tv = false, is_cv = false;
struct drm_encoder *encoder;
 
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id)
return;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
/* find tv std */
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
tv_std = tv_dac->tv_std;
is_tv = true;
}
}
}
 
memset(&args, 0, sizeof(args));
 
231,6 → 225,22
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
 
memset(&args, 0, sizeof(args));
 
args.ucDispPipeId = radeon_crtc->crtc_id;
args.ucEnable = state;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
242,8 → 252,10
radeon_crtc->enabled = true;
/* adjust pm to dpms changes BEFORE enabling crtcs */
radeon_pm_compute_clocks(rdev);
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
atombios_blank_crtc(crtc, ATOM_DISABLE);
drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
255,10 → 267,12
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
radeon_crtc->enabled = false;
if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set)
atombios_powergate_crtc(crtc, ATOM_ENABLE);
/* adjust pm to dpms changes AFTER disabling crtcs */
radeon_pm_compute_clocks(rdev);
break;
355,15 → 369,12
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_disable_ss(struct drm_crtc *crtc)
static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
u32 ss_cntl;
 
if (ASIC_IS_DCE4(rdev)) {
switch (radeon_crtc->pll_id) {
switch (pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
379,7 → 390,7
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
switch (radeon_crtc->pll_id) {
switch (pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
406,16 → 417,31
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
 
static void atombios_crtc_program_ss(struct drm_crtc *crtc,
static void atombios_crtc_program_ss(struct radeon_device *rdev,
int enable,
int pll_id,
int crtc_id,
struct radeon_atom_ss *ss)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
unsigned i;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
 
if (!enable) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
i != crtc_id &&
pll_id == rdev->mode_info.crtcs[i]->pll_id) {
/* one other crtc is using this pll don't turn
* off spread spectrum as it might turn off
* display on active crtc
*/
return;
}
}
}
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE5(rdev)) {
424,24 → 450,20
switch (pll_id) {
case ATOM_PPLL1:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v3.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE61(rdev))
args.v3.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE4(rdev)) {
args.v2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
449,24 → 471,20
switch (pll_id) {
case ATOM_PPLL1:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_PPLL2:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
break;
case ATOM_DCPLL:
args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
break;
case ATOM_PPLL_INVALID:
return;
}
args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
args.v2.ucEnable = enable;
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK))
if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev))
args.v2.ucEnable = ATOM_DISABLE;
} else if (ASIC_IS_DCE3(rdev)) {
args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
479,7 → 497,7
} else if (ASIC_IS_AVIVO(rdev)) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(crtc);
atombios_disable_ss(rdev, pll_id);
return;
}
args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
491,7 → 509,7
} else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
atombios_disable_ss(crtc);
atombios_disable_ss(rdev, pll_id);
return;
}
args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
509,56 → 527,51
};
 
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct radeon_pll *pll,
bool ss_enabled,
struct radeon_atom_ss *ss)
struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
struct drm_connector *connector = NULL;
struct drm_encoder *encoder = radeon_crtc->encoder;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
u32 adjusted_clock = mode->clock;
int encoder_mode = 0;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
int bpc = 8;
int bpc = radeon_get_monitor_bpc(connector);
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
 
/* reset the pll flags */
pll->flags = 0;
radeon_crtc->pll_flags = 0;
 
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
(rdev->family == CHIP_RS740))
pll->flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
radeon_crtc->pll_flags |= (/*RADEON_PLL_USE_FRAC_FB_DIV |*/
RADEON_PLL_PREFER_CLOSEST_LOWER);
 
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
if (rdev->family < CHIP_RV770)
pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
} else {
pll->flags |= RADEON_PLL_LEGACY;
radeon_crtc->pll_flags |= RADEON_PLL_LEGACY;
 
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
else
pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
if (connector)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder)) {
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
570,12 → 583,12
 
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
if (ss->refdiv) {
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = ss->refdiv;
if (radeon_crtc->ss_enabled) {
if (radeon_crtc->ss.refdiv) {
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv;
if (ASIC_IS_AVIVO(rdev))
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
}
}
}
585,18 → 598,15
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
pll->flags |= RADEON_PLL_IS_LCD;
radeon_crtc->pll_flags |= RADEON_PLL_IS_LCD;
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS)
pll->flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
}
break;
}
}
 
/* DCE3+ has an AdjustDisplayPll that will adjust the pixel clock
* accordingly based on the encoder/transmitter to work around
622,7 → 632,7
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
args.v1.ucEncodeMode = encoder_mode;
if (ss_enabled && ss->percentage)
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
args.v1.ucConfig |=
ADJUST_DISPLAY_CONFIG_SS_ENABLE;
 
635,47 → 645,32
args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
args.v3.sInput.ucEncodeMode = encoder_mode;
args.v3.sInput.ucDispPllConfig = 0;
if (ss_enabled && ss->percentage)
if (radeon_crtc->ss_enabled && radeon_crtc->ss.percentage)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_SS_ENABLE;
if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT) ||
radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
if (ENCODER_MODE_IS_DP(encoder_mode)) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else {
if (encoder_mode == ATOM_ENCODER_MODE_HDMI) {
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (encoder_mode == ATOM_ENCODER_MODE_HDMI)
/* deep color support */
args.v3.sInput.usPixelClock =
cpu_to_le16((mode->clock * bpc / 8) / 10);
}
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
if (mode->clock > 165000)
if (is_duallink)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (encoder_mode == ATOM_ENCODER_MODE_DP) {
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
/* 16200 or 27000 */
args.v3.sInput.usPixelClock = cpu_to_le16(dp_clock / 10);
} else if (encoder_mode != ATOM_ENCODER_MODE_LVDS) {
if (mode->clock > 165000)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
}
if (radeon_encoder_is_dp_bridge(encoder)) {
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
} else
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)
args.v3.sInput.ucExtTransmitterID =
radeon_encoder_get_dp_bridge_encoder_id(encoder);
else
args.v3.sInput.ucExtTransmitterID = 0;
 
atom_execute_table(rdev->mode_info.atom_context,
682,14 → 677,14
index, (uint32_t *)&args);
adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
if (args.v3.sOutput.ucRefDiv) {
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_REF_DIV;
pll->reference_div = args.v3.sOutput.ucRefDiv;
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV;
radeon_crtc->pll_reference_div = args.v3.sOutput.ucRefDiv;
}
if (args.v3.sOutput.ucPostDiv) {
pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
pll->flags |= RADEON_PLL_USE_POST_DIV;
pll->post_div = args.v3.sOutput.ucPostDiv;
radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV;
radeon_crtc->pll_flags |= RADEON_PLL_USE_POST_DIV;
radeon_crtc->pll_post_div = args.v3.sOutput.ucPostDiv;
}
break;
default:
717,11 → 712,9
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
static void atombios_crtc_set_disp_eng_pll(struct radeon_device *rdev,
u32 dispclk)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
u8 frev, crev;
int index;
union set_pixel_clock args;
749,6 → 742,11
* SetPixelClock provides the dividers
*/
args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
if (ASIC_IS_DCE61(rdev))
args.v6.ucPpll = ATOM_EXT_PLL1;
else if (ASIC_IS_DCE6(rdev))
args.v6.ucPpll = ATOM_PPLL0;
else
args.v6.ucPpll = ATOM_DCPLL;
break;
default:
821,7 → 819,10
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
args.v3.ucPpll = pll_id;
args.v3.ucMiscInfo = (pll_id << 2);
if (crtc_id == ATOM_CRTC2)
args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2;
else
args.v3.ucMiscInfo = PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1;
if (ss_enabled && (ss->type & ATOM_EXTERNAL_SS_MASK))
args.v3.ucMiscInfo |= PIXEL_CLOCK_MISC_REF_DIV_SRC;
args.v3.ucTransmitterId = encoder_id;
891,103 → 892,84
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
static bool atombios_crtc_prepare_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
int encoder_mode = 0;
struct radeon_atom_ss ss;
bool ss_enabled = false;
int bpc = 8;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
radeon_crtc->bpc = 8;
radeon_crtc->ss_enabled = false;
 
if (!radeon_encoder)
return;
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
break;
case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
default:
pll = &rdev->clock.dcpll;
break;
}
 
if (radeon_encoder->active_device &
(ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) {
if ((radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(radeon_crtc->encoder) != ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector =
radeon_get_connector_for_encoder(encoder);
radeon_get_connector_for_encoder(radeon_crtc->encoder);
struct radeon_connector *radeon_connector =
to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
int dp_clock;
bpc = connector->display_info.bpc;
radeon_crtc->bpc = radeon_get_monitor_bpc(connector);
 
switch (encoder_mode) {
case ATOM_ENCODER_MODE_DP_MST:
case ATOM_ENCODER_MODE_DP:
/* DP/eDP */
dp_clock = dig_connector->dp_clock / 10;
if (ASIC_IS_DCE4(rdev))
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &radeon_crtc->ss,
ASIC_INTERNAL_SS_ON_DP,
dp_clock);
else {
if (dp_clock == 16200) {
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID2);
if (!ss_enabled)
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
if (!radeon_crtc->ss_enabled)
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
} else
ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
ATOM_DP_SS_ID1);
}
break;
case ATOM_ENCODER_MODE_LVDS:
if (ASIC_IS_DCE4(rdev))
ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev,
&radeon_crtc->ss,
dig->lcd_ss_id,
mode->clock / 10);
else
ss_enabled = radeon_atombios_get_ppll_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_ppll_ss_info(rdev,
&radeon_crtc->ss,
dig->lcd_ss_id);
break;
case ATOM_ENCODER_MODE_DVI:
if (ASIC_IS_DCE4(rdev))
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev,
&radeon_crtc->ss,
ASIC_INTERNAL_SS_ON_TMDS,
mode->clock / 10);
break;
case ATOM_ENCODER_MODE_HDMI:
if (ASIC_IS_DCE4(rdev))
ss_enabled =
radeon_atombios_get_asic_ss_info(rdev, &ss,
radeon_crtc->ss_enabled =
radeon_atombios_get_asic_ss_info(rdev,
&radeon_crtc->ss,
ASIC_INTERNAL_SS_ON_HDMI,
mode->clock / 10);
break;
997,43 → 979,80
}
 
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
radeon_crtc->adjusted_clock = atombios_adjust_pll(crtc, mode);
 
return true;
}
 
static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_clock = mode->clock;
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
break;
case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
break;
case ATOM_DCPLL:
case ATOM_PPLL_INVALID:
default:
pll = &rdev->clock.dcpll;
break;
}
 
/* update pll params */
pll->flags = radeon_crtc->pll_flags;
pll->reference_div = radeon_crtc->pll_reference_div;
pll->post_div = radeon_crtc->pll_post_div;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
/* TV seems to prefer the legacy algo on some boards */
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
else if (ASIC_IS_AVIVO(rdev))
radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
radeon_compute_pll_avivo(pll, radeon_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
else
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
radeon_compute_pll_legacy(pll, radeon_crtc->adjusted_clock, &pll_clock,
&fb_div, &frac_fb_div, &ref_div, &post_div);
 
atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id,
radeon_crtc->crtc_id, &radeon_crtc->ss);
 
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
ref_div, fb_div, frac_fb_div, post_div, bpc, ss_enabled, &ss);
ref_div, fb_div, frac_fb_div, post_div,
radeon_crtc->bpc, radeon_crtc->ss_enabled, &radeon_crtc->ss);
 
if (ss_enabled) {
if (radeon_crtc->ss_enabled) {
/* calculate ss amount and step size */
if (ASIC_IS_DCE4(rdev)) {
u32 step_size;
u32 amount = (((fb_div * 10) + frac_fb_div) * ss.percentage) / 10000;
ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
u32 amount = (((fb_div * 10) + frac_fb_div) * radeon_crtc->ss.percentage) / 10000;
radeon_crtc->ss.amount = (amount / 10) & ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK;
radeon_crtc->ss.amount |= ((amount - (amount / 10)) << ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT) &
ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK;
if (ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * (ss.rate * 2048)) /
if (radeon_crtc->ss.type & ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD)
step_size = (4 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
else
step_size = (2 * amount * ref_div * (ss.rate * 2048)) /
step_size = (2 * amount * ref_div * (radeon_crtc->ss.rate * 2048)) /
(125 * 25 * pll->reference_freq / 100);
ss.step = step_size;
radeon_crtc->ss.step = step_size;
}
 
atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id,
radeon_crtc->crtc_id, &radeon_crtc->ss);
}
}
 
1050,6 → 1069,7
struct radeon_bo *rbo;
uint64_t fb_location;
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
unsigned bankw, bankh, mtaspect, tile_split;
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
u32 tmp, viewport_w, viewport_h;
int r;
1121,11 → 1141,43
return -EINVAL;
}
 
if (tiling_flags & RADEON_TILING_MACRO)
if (tiling_flags & RADEON_TILING_MACRO) {
if (rdev->family >= CHIP_TAHITI)
tmp = rdev->config.si.tile_config;
else if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
 
switch ((tmp & 0xf0) >> 4) {
case 0: /* 4 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
break;
case 1: /* 8 banks */
default:
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
break;
case 2: /* 16 banks */
fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
break;
}
 
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
 
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
fb_format |= EVERGREEN_GRPH_MACRO_TILE_ASPECT(mtaspect);
} else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
 
if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
else if (rdev->family == CHIP_VERDE)
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
 
switch (radeon_crtc->crtc_id) {
case 0:
WREG32(AVIVO_D1VGA_CONTROL, 0);
1167,12 → 1219,12
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
crtc->mode.vdisplay);
target_fb->height);
x &= ~3;
y &= ~1;
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
1336,12 → 1388,12
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
 
fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
crtc->mode.vdisplay);
target_fb->height);
x &= ~3;
y &= ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
1429,54 → 1481,273
}
}
 
/**
* radeon_get_pll_use_mask - look up a mask of which pplls are in use
*
* @crtc: drm crtc
*
* Returns the mask of which PPLLs (Pixel PLLs) are in use.
*/
static u32 radeon_get_pll_use_mask(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 pll_in_use = 0;
 
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
 
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
pll_in_use |= (1 << test_radeon_crtc->pll_id);
}
return pll_in_use;
}
 
/**
* radeon_get_shared_dp_ppll - return the PPLL used by another crtc for DP
*
* @crtc: drm crtc
*
* Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
* also in DP mode. For DP, a single PPLL can be used for all DP
* crtcs/encoders.
*/
static int radeon_get_shared_dp_ppll(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
 
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* for DP use the same PLL for all */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
}
}
return ATOM_PPLL_INVALID;
}
 
/**
* radeon_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
*
* @crtc: drm crtc
* @encoder: drm encoder
*
* Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
* be shared (i.e., same clock).
*/
static int radeon_get_shared_nondp_ppll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *test_crtc;
struct radeon_crtc *test_radeon_crtc;
u32 adjusted_clock, test_adjusted_clock;
 
adjusted_clock = radeon_crtc->adjusted_clock;
 
if (adjusted_clock == 0)
return ATOM_PPLL_INVALID;
 
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
if (crtc == test_crtc)
continue;
test_radeon_crtc = to_radeon_crtc(test_crtc);
if (test_radeon_crtc->encoder &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_radeon_crtc->encoder))) {
/* check if we are already driving this connector with another crtc */
if (test_radeon_crtc->connector == radeon_crtc->connector) {
/* if we are, return that pll */
if (test_radeon_crtc->pll_id != ATOM_PPLL_INVALID)
return test_radeon_crtc->pll_id;
}
/* for non-DP check the clock */
test_adjusted_clock = test_radeon_crtc->adjusted_clock;
if ((crtc->mode.clock == test_crtc->mode.clock) &&
(adjusted_clock == test_adjusted_clock) &&
(radeon_crtc->ss_enabled == test_radeon_crtc->ss_enabled) &&
(test_radeon_crtc->pll_id != ATOM_PPLL_INVALID))
return test_radeon_crtc->pll_id;
}
}
return ATOM_PPLL_INVALID;
}
 
/**
* radeon_atom_pick_pll - Allocate a PPLL for use by the crtc.
*
* @crtc: drm crtc
*
* Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors
* a single PPLL can be used for all DP crtcs/encoders. For non-DP
* monitors a dedicated PPLL must be used. If a particular board has
* an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming
* as there is no need to program the PLL itself. If we are not able to
* allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to
* avoid messing up an existing monitor.
*
* Asic specific PLL information
*
* DCE 6.1
* - PPLL2 is only available to UNIPHYA (both DP and non-DP)
* - PPLL0, PPLL1 are available for UNIPHYB/C/D/E/F (both DP and non-DP)
*
* DCE 6.0
* - PPLL0 is available to all UNIPHY (DP only)
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
* DCE 5.0
* - DCPLL is available to all UNIPHY (DP only)
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
* DCE 3.0/4.0/4.1
* - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC
*
*/
static int radeon_atom_pick_pll(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *test_encoder;
struct drm_crtc *test_crtc;
uint32_t pll_in_use = 0;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
u32 pll_in_use;
int pll;
 
if (ASIC_IS_DCE4(rdev)) {
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
if (ASIC_IS_DCE61(rdev)) {
struct radeon_encoder_atom_dig *dig =
radeon_encoder->enc_priv;
 
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY) &&
(dig->linkb == false))
/* UNIPHY A uses PPLL2 */
return ATOM_PPLL2;
else if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
/* UNIPHY B/C/D/E/F */
if (rdev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
return ATOM_PPLL_INVALID;
else {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* UNIPHY B/C/D/E/F */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL0)))
return ATOM_PPLL0;
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
* DCE4: PPLL or ext clock
* DCE5: DCPLL or ext clock
* DCE5: PPLL, DCPLL, or ext clock
* DCE6: PPLL, PPLL0, or ext clock
*
* Setting ATOM_PPLL_INVALID will cause SetPixelClock to skip
* PPLL/DCPLL programming and only program the DP DTO for the
* crtc virtual pixel clock.
*/
if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
if (ASIC_IS_DCE5(rdev) || rdev->clock.dp_extclk)
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
if (rdev->clock.dp_extclk)
/* skip PPLL programming if using ext clock */
return ATOM_PPLL_INVALID;
else if (ASIC_IS_DCE6(rdev))
/* use PPLL0 for all DP */
return ATOM_PPLL0;
else if (ASIC_IS_DCE5(rdev))
/* use DCPLL for all DP */
return ATOM_DCPLL;
else {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
if (ASIC_IS_AVIVO(rdev)) {
/* in DP mode, the DP ref clock can come from either PPLL
* depending on the asic:
* DCE3: PPLL1 or PPLL2
*/
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
/* use the same PPLL for all DP monitors */
pll = radeon_get_shared_dp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
} else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
return pll;
}
 
/* otherwise, pick one of the plls */
list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_test_crtc;
 
if (crtc == test_crtc)
continue;
 
radeon_test_crtc = to_radeon_crtc(test_crtc);
if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
(radeon_test_crtc->pll_id <= ATOM_PPLL2))
pll_in_use |= (1 << radeon_test_crtc->pll_id);
}
if (!(pll_in_use & 1))
/* all other cases */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
} else
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
/* on pre-R5xx asics, the crtc to pll mapping is hardcoded */
return radeon_crtc->crtc_id;
}
}
}
 
void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev)
{
/* always set DCPLL */
if (ASIC_IS_DCE6(rdev))
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
else if (ASIC_IS_DCE4(rdev)) {
struct radeon_atom_ss ss;
bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss);
}
 
}
 
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
1485,32 → 1756,14
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder =
to_radeon_encoder(radeon_crtc->encoder);
bool is_tvcv = false;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
/* find tv std */
if (encoder->crtc == crtc) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->active_device &
(ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
is_tvcv = true;
}
}
 
/* always set DCPLL */
if (ASIC_IS_DCE4(rdev)) {
struct radeon_atom_ss ss;
bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_SS_ON_DCPLL,
rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
/* XXX: DCE5, make sure voltage, dispclk is high enough */
atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
if (ss_enabled)
atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
}
atombios_crtc_set_pll(crtc, adjusted_mode);
 
if (ASIC_IS_DCE4(rdev))
1533,17 → 1786,37
}
 
static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
/* assign the encoder to the radeon crtc to avoid repeated lookups later */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_crtc->encoder = encoder;
radeon_crtc->connector = radeon_get_connector_for_encoder(encoder);
break;
}
}
if ((radeon_crtc->encoder == NULL) || (radeon_crtc->connector == NULL)) {
radeon_crtc->encoder = NULL;
radeon_crtc->connector = NULL;
return false;
}
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
if (!atombios_crtc_prepare_pll(crtc, adjusted_mode))
return false;
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
/* if we can't get a PPLL for a non-DP encoder, fail */
if ((radeon_crtc->pll_id == ATOM_PPLL_INVALID) &&
!ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder)))
return false;
 
return true;
}
 
1550,10 → 1823,15
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* pick pll */
radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
radeon_crtc->in_mode_set = true;
 
/* disable crtc pair power gating before programming */
if (ASIC_IS_DCE6(rdev))
atombios_powergate_crtc(crtc, ATOM_DISABLE);
 
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
1560,17 → 1838,35
 
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
atombios_lock_crtc(crtc, ATOM_DISABLE);
radeon_crtc->in_mode_set = false;
}
 
static void atombios_crtc_disable(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_atom_ss ss;
int i;
 
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
 
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i] &&
rdev->mode_info.crtcs[i]->enabled &&
i != radeon_crtc->crtc_id &&
radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) {
/* one other crtc is using this pll don't turn
* off the pll
*/
goto done;
}
}
 
switch (radeon_crtc->pll_id) {
case ATOM_PPLL1:
case ATOM_PPLL2:
1578,10 → 1874,20
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
case ATOM_PPLL0:
/* disable the ppll */
if (ASIC_IS_DCE61(rdev))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss);
break;
default:
break;
}
radeon_crtc->pll_id = -1;
done:
radeon_crtc->pll_id = ATOM_PPLL_INVALID;
radeon_crtc->adjusted_clock = 0;
radeon_crtc->encoder = NULL;
radeon_crtc->connector = NULL;
}
 
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
1630,6 → 1936,9
else
radeon_crtc->crtc_offset = 0;
}
radeon_crtc->pll_id = -1;
radeon_crtc->pll_id = ATOM_PPLL_INVALID;
radeon_crtc->adjusted_clock = 0;
radeon_crtc->encoder = NULL;
radeon_crtc->connector = NULL;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
/drivers/video/drm/radeon/atombios_dp.c
22,14 → 22,15
*
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
#include "atom.h"
#include "atom-bits.h"
#include "drm_dp_helper.h"
#include <drm/drm_dp_helper.h>
 
/* move these to drm_dp_helper.c/h */
#define DP_LINK_CONFIGURATION_SIZE 9
63,12 → 64,12
 
memset(&args, 0, sizeof(args));
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
 
memcpy(base, send, send_bytes);
 
args.v1.lpAuxRequest = 0;
args.v1.lpDataOut = 16;
args.v1.lpAuxRequest = 0 + 4;
args.v1.lpDataOut = 16 + 4;
args.v1.ucDataOutLen = 0;
args.v1.ucChannelID = chan->rec.i2c_id;
args.v1.ucDelay = delay / 10;
115,6 → 116,7
u8 msg[20];
int msg_bytes = send_bytes + 4;
u8 ack;
unsigned retry;
 
if (send_bytes > 16)
return -1;
125,13 → 127,15
msg[3] = (msg_bytes << 4) | (send_bytes - 1);
memcpy(&msg[4], send, send_bytes);
 
while (1) {
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, NULL, 0, delay, &ack);
if (ret < 0)
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
break;
return send_bytes;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else
138,7 → 142,7
return -EIO;
}
 
return send_bytes;
return -EIO;
}
 
static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector,
149,6 → 153,7
int msg_bytes = 4;
u8 ack;
int ret;
unsigned retry;
 
msg[0] = address;
msg[1] = address >> 8;
155,20 → 160,24
msg[2] = AUX_NATIVE_READ << 4;
msg[3] = (msg_bytes << 4) | (recv_bytes - 1);
 
while (1) {
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus,
msg, msg_bytes, recv, recv_bytes, delay, &ack);
if (ret == 0)
return -EPROTO;
if (ret < 0)
if (ret == -EBUSY)
continue;
else if (ret < 0)
return ret;
if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
return ret;
else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
udelay(400);
else if (ret == 0)
return -EPROTO;
else
return -EIO;
}
 
return -EIO;
}
 
static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector,
232,7 → 241,9
for (retry = 0; retry < 4; retry++) {
ret = radeon_process_aux_ch(auxch,
msg, msg_bytes, reply, reply_bytes, 0, &ack);
if (ret < 0) {
if (ret == -EBUSY)
continue;
else if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
273,7 → 284,7
}
}
 
DRM_ERROR("aux i2c too many retries, giving up\n");
DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
return -EREMOTEIO;
}
 
450,7 → 461,7
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int max_link_rate = dp_get_max_link_rate(dpcd);
int max_lane_num = dp_get_max_lane_number(dpcd);
int lane_num;
469,10 → 480,11
u8 dpcd[DP_DPCD_SIZE],
int pix_clock)
{
int bpp = convert_bpc_to_bpp(connector->display_info.bpc);
int bpp = convert_bpc_to_bpp(radeon_get_monitor_bpc(connector));
int lane_num, max_pix_clock;
 
if (radeon_connector_encoder_is_dp_bridge(connector))
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
return 270000;
 
lane_num = radeon_dp_get_dp_lane_number(connector, dpcd, pix_clock);
519,6 → 531,23
dig_connector->dp_i2c_bus->rec.i2c_id, 0);
}
 
static void radeon_dp_probe_oui(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
u8 buf[3];
 
if (!(dig_connector->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
return;
 
if (radeon_dp_aux_native_read(radeon_connector, DP_SINK_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
 
if (radeon_dp_aux_native_read(radeon_connector, DP_BRANCH_OUI, buf, 3, 0))
DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
buf[0], buf[1], buf[2]);
}
 
bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
{
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
532,6 → 561,9
for (i = 0; i < 8; i++)
DRM_DEBUG_KMS("%02x ", msg[i]);
DRM_DEBUG_KMS("\n");
 
radeon_dp_probe_oui(radeon_connector);
 
return true;
}
dig_connector->dpcd[0] = 0;
538,26 → 570,41
return false;
}
 
static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector);
u8 tmp;
 
if (!ASIC_IS_DCE4(rdev))
return;
return panel_mode;
 
if (radeon_connector_encoder_is_dp_bridge(connector))
if (dp_bridge != ENCODER_OBJECT_ID_NONE) {
/* DP bridge chips */
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) ||
(dp_bridge == ENCODER_OBJECT_ID_TRAVIS))
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else
panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
} else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
/* eDP */
tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
 
atombios_dig_encoder_setup(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
panel_mode);
return panel_mode;
}
 
void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
603,16 → 650,25
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
DRM_ERROR("displayport link status failed\n");
return false;
}
 
DRM_DEBUG_KMS("link status %02x %02x %02x %02x %02x %02x\n",
link_status[0], link_status[1], link_status[2],
link_status[3], link_status[4], link_status[5]);
DRM_DEBUG_KMS("link status %*ph\n", 6, link_status);
return true;
}
 
bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
{
u8 link_status[DP_LINK_STATUS_SIZE];
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if (!radeon_dp_get_link_status(radeon_connector, link_status))
return false;
if (dp_channel_eq_ok(link_status, dig->dp_lane_count))
return false;
return true;
}
 
struct radeon_dp_link_train_info {
struct radeon_device *rdev;
struct drm_encoder *encoder;
679,6 → 735,8
 
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u8 tmp;
 
/* power up the sink */
694,11 → 752,15
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
 
radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
(dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
}
 
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
if (dp_info->dpcd[0] >= 0x11)
if (dp_info->dpcd[DP_DPCD_REV] >= 0x11 &&
dp_info->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)
tmp |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
radeon_write_dpcd_reg(dp_info->radeon_connector, DP_LANE_COUNT_SET, tmp);
 
764,8 → 826,10
else
mdelay(dp_info->rd_interval * 4);
 
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
 
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
827,8 → 891,10
else
mdelay(dp_info->rd_interval * 4);
 
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
DRM_ERROR("displayport link status failed\n");
break;
}
 
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
/drivers/video/drm/radeon/atombios_encoders.c
0,0 → 1,2658
/*
* Copyright 2007-11 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alex Deucher
*/
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
#include <linux/backlight.h>
 
extern int atom_debug;
 
static u8
radeon_atom_get_backlight_level_from_reg(struct radeon_device *rdev)
{
u8 backlight_level;
u32 bios_2_scratch;
 
if (rdev->family >= CHIP_R600)
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
else
bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
 
backlight_level = ((bios_2_scratch & ATOM_S2_CURRENT_BL_LEVEL_MASK) >>
ATOM_S2_CURRENT_BL_LEVEL_SHIFT);
 
return backlight_level;
}
 
static void
radeon_atom_set_backlight_level_to_reg(struct radeon_device *rdev,
u8 backlight_level)
{
u32 bios_2_scratch;
 
if (rdev->family >= CHIP_R600)
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
else
bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH);
 
bios_2_scratch &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
bios_2_scratch |= ((backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
ATOM_S2_CURRENT_BL_LEVEL_MASK);
 
if (rdev->family >= CHIP_R600)
WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch);
else
WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch);
}
 
u8
atombios_get_backlight_level(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
 
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return 0;
 
return radeon_atom_get_backlight_level_from_reg(rdev);
}
 
void
atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
{
struct drm_encoder *encoder = &radeon_encoder->base;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dig *dig;
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index;
 
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
 
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) &&
radeon_encoder->enc_priv) {
dig = radeon_encoder->enc_priv;
dig->backlight_level = level;
radeon_atom_set_backlight_level_to_reg(rdev, dig->backlight_level);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
if (dig->backlight_level == 0) {
args.ucAction = ATOM_LCD_BLOFF;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
} else {
args.ucAction = ATOM_LCD_BL_BRIGHTNESS_CONTROL;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->backlight_level == 0)
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
else {
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_BL_BRIGHTNESS_CONTROL, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
}
break;
default:
break;
}
}
}
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
static u8 radeon_atom_bl_level(struct backlight_device *bd)
{
u8 level;
 
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
level = RADEON_MAX_BL_LEVEL;
else
level = bd->props.brightness;
 
return level;
}
 
static int radeon_atom_backlight_update_status(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
struct radeon_encoder *radeon_encoder = pdata->encoder;
 
atombios_set_backlight_level(radeon_encoder, radeon_atom_bl_level(bd));
 
return 0;
}
 
static int radeon_atom_backlight_get_brightness(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
struct radeon_encoder *radeon_encoder = pdata->encoder;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
 
return radeon_atom_get_backlight_level_from_reg(rdev);
}
 
static const struct backlight_ops radeon_atom_backlight_ops = {
.get_brightness = radeon_atom_backlight_get_brightness,
.update_status = radeon_atom_backlight_update_status,
};
 
void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct backlight_device *bd;
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
struct radeon_encoder_atom_dig *dig;
u8 backlight_level;
char bl_name[16];
 
if (!radeon_encoder->enc_priv)
return;
 
if (!rdev->is_atom_bios)
return;
 
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
 
pdata = kmalloc(sizeof(struct radeon_backlight_privdata), GFP_KERNEL);
if (!pdata) {
DRM_ERROR("Memory allocation failed\n");
goto error;
}
 
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_atom_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
goto error;
}
 
pdata->encoder = radeon_encoder;
 
backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
 
dig = radeon_encoder->enc_priv;
dig->bl_dev = bd;
 
bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
bd->props.power = FB_BLANK_UNBLANK;
backlight_update_status(bd);
 
DRM_INFO("radeon atom DIG backlight initialized\n");
 
return;
 
error:
kfree(pdata);
return;
}
 
static void radeon_atom_backlight_exit(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct backlight_device *bd = NULL;
struct radeon_encoder_atom_dig *dig;
 
if (!radeon_encoder->enc_priv)
return;
 
if (!rdev->is_atom_bios)
return;
 
if (!(rdev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU))
return;
 
dig = radeon_encoder->enc_priv;
bd = dig->bl_dev;
dig->bl_dev = NULL;
 
if (bd) {
struct radeon_legacy_backlight_privdata *pdata;
 
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
kfree(pdata);
 
DRM_INFO("radeon atom LVDS backlight unloaded\n");
}
}
 
#else /* !CONFIG_BACKLIGHT_CLASS_DEVICE */
 
void radeon_atom_backlight_init(struct radeon_encoder *encoder)
{
}
 
static void radeon_atom_backlight_exit(struct radeon_encoder *encoder)
{
}
 
#endif
 
/* evil but including atombios.h is much worse */
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
 
 
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return true;
default:
return false;
}
}
 
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT|ATOM_DEVICE_DFP_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
 
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
tv_dac->tv_std == TV_STD_NTSC_J ||
tv_dac->tv_std == TV_STD_PAL_M)
radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
}
 
if (ASIC_IS_DCE3(rdev) &&
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
 
return true;
}
 
static void
atombios_dac_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
break;
}
 
args.ucAction = action;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
args.ucDacStandard = ATOM_DAC1_PS2;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.ucDacStandard = ATOM_DAC1_CV;
else {
switch (dac_info->tv_std) {
case TV_STD_PAL:
case TV_STD_PAL_M:
case TV_STD_SCART_PAL:
case TV_STD_SECAM:
case TV_STD_PAL_CN:
args.ucDacStandard = ATOM_DAC1_PAL;
break;
case TV_STD_NTSC:
case TV_STD_NTSC_J:
case TV_STD_PAL_60:
default:
args.ucDacStandard = ATOM_DAC1_NTSC;
break;
}
}
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
static void
atombios_tv_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
TV_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
 
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
 
args.sTVEncoder.ucAction = action;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
else {
switch (dac_info->tv_std) {
case TV_STD_NTSC:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
case TV_STD_PAL:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
break;
case TV_STD_PAL_M:
args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
break;
case TV_STD_PAL_60:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
break;
case TV_STD_NTSC_J:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
break;
case TV_STD_SCART_PAL:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
break;
case TV_STD_SECAM:
args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
break;
case TV_STD_PAL_CN:
args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
break;
default:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
}
}
 
args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
 
if (connector)
bpc = radeon_get_monitor_bpc(connector);
 
switch (bpc) {
case 0:
return PANEL_BPC_UNDEFINE;
case 6:
return PANEL_6BIT_PER_COLOR;
case 8:
default:
return PANEL_8BIT_PER_COLOR;
case 10:
return PANEL_10BIT_PER_COLOR;
case 12:
return PANEL_12BIT_PER_COLOR;
case 16:
return PANEL_16BIT_PER_COLOR;
}
}
 
 
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
};
 
void
atombios_dvo_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
uint8_t frev, crev;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
/* some R4xx chips have the wrong frev */
if (rdev->family <= CHIP_RV410)
frev = 1;
 
switch (frev) {
case 1:
switch (crev) {
case 1:
/* R4xx, R5xx */
args.ext_tmds.sXTmdsEncoder.ucEnable = action;
 
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
 
args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
break;
case 2:
/* RS600/690/740 */
args.dvo.sDVOEncoder.ucAction = action;
args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
/* DFP1, CRT1, TV1 depending on the type of port */
args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
 
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
break;
case 3:
/* R6xx */
args.dvo_v3.ucAction = action;
args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
 
void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
 
if (!dig)
return;
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
hdmi_detected = 1;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
else
index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
break;
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
case 2:
switch (crev) {
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
} else {
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
break;
case 2:
case 3:
args.v2.ucMisc = 0;
args.v2.ucAction = action;
if (crev == 3) {
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
args.v2.ucSpatial = 0;
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
 
/* dp bridges are always DP */
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)
return ATOM_ENCODER_MODE_DP;
 
/* DVO is always DVO */
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DVO1) ||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1))
return ATOM_ENCODER_MODE_DVO;
 
connector = radeon_get_connector_for_encoder(encoder);
/* if we don't have an active device yet, just use one of
* the connectors tied to the encoder.
*/
if (!connector)
connector = radeon_get_connector_for_encoder_init(encoder);
radeon_connector = to_radeon_connector(connector);
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
radeon_audio)
return ATOM_ENCODER_MODE_HDMI;
else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/
break;
}
}
 
/*
* DIG Encoder/Transmitter Setup
*
* DCE 3.0/3.1
* - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
* Supports up to 3 digital outputs
* - 2 DIG encoder blocks.
* DIG1 can drive UNIPHY link A or link B
* DIG2 can drive UNIPHY link B or LVTMA
*
* DCE 3.2
* - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
* Supports up to 5 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* DCE 4.0/5.0/6.0
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
* DIG1 drives UNIPHY0 link A, A+B
* DIG2 drives UNIPHY0 link B
* DIG3 drives UNIPHY1 link A, A+B
* DIG4 drives UNIPHY1 link B
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
* DCE 4.1
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 2 DIG encoder blocks.
* llano
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
* ontario
* DIG1 drives UNIPHY0/1/2 link A
* DIG2 drives UNIPHY0/1/2 link B
*
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
* crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
* crtc1 -> dig1 -> UNIPHY0 link B -> DP
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
 
union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
 
void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int hpd_id = RADEON_HPD_NONE;
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
}
 
/* no dig encoder assigned */
if (dig->dig_encoder == -1)
return;
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
else {
if (dig->dig_encoder)
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
switch (crev) {
case 1:
args.v1.ucAction = action;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
args.v1.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
 
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
break;
case 2:
case 3:
args.v3.ucAction = action;
args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v3.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode))
args.v3.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
 
if (ENCODER_MODE_IS_DP(args.v3.ucEncoderMode) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
args.v3.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
case 4:
args.v4.ucAction = action;
args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v4.ucPanelMode = panel_mode;
else
args.v4.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode))
args.v4.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
 
if (ENCODER_MODE_IS_DP(args.v4.ucEncoderMode)) {
if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
args.v4.ucBitPerColor = radeon_atom_get_bpc(encoder);
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
else
args.v4.ucHPD_ID = hpd_id + 1;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V1_5 v5;
};
 
void
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector;
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
int igp_lane_info = 0;
int dig_encoder = dig->dig_encoder;
int hpd_id = RADEON_HPD_NONE;
 
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
connector = radeon_get_connector_for_encoder_init(encoder);
/* just needed to avoid bailing in the encoder check. the encoder
* isn't used for init
*/
dig_encoder = 0;
} else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
hpd_id = radeon_connector->hpd.hpd;
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
igp_lane_info = dig_connector->igp_lane_info;
}
 
if (encoder->crtc) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
 
/* no dig encoder assigned */
if (dig_encoder == -1)
return;
 
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)))
is_dp = true;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
break;
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
switch (crev) {
case 1:
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
 
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
 
if (dig_encoder)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp ||
!radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
 
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
 
if (is_dp)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
break;
case 2:
args.v2.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v2.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v2.asMode.ucLaneSel = lane_num;
args.v2.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v2.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
 
args.v2.acConfig.ucEncoderSel = dig_encoder;
if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp) {
args.v2.acConfig.fCoherentMode = 1;
args.v2.acConfig.fDPConnector = 1;
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.acConfig.fDualLinkConnector = 1;
}
break;
case 3:
args.v3.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v3.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v3.asMode.ucLaneSel = lane_num;
args.v3.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v3.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
 
if (is_dp)
args.v3.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
 
if (dig->linkb)
args.v3.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v3.acConfig.ucEncoderSel = 1;
 
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
* one.
*/
/* On DCE4, if there is an external clock, it generates the DP ref clock */
if (is_dp && rdev->clock.dp_extclk)
args.v3.acConfig.ucRefClkSource = 2; /* external src */
else
args.v3.acConfig.ucRefClkSource = pll_id;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v3.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v3.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v3.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp)
args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.acConfig.fDualLinkConnector = 1;
}
break;
case 4:
args.v4.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v4.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v4.asMode.ucLaneSel = lane_num;
args.v4.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v4.usPixelClock = cpu_to_le16(dp_clock / 10);
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
 
if (is_dp)
args.v4.ucLaneNum = dp_lane_count;
else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
 
if (dig->linkb)
args.v4.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v4.acConfig.ucEncoderSel = 1;
 
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
* one.
*/
/* On DCE5 DCPLL usually generates the DP ref clock */
if (is_dp) {
if (rdev->clock.dp_extclk)
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
} else
args.v4.acConfig.ucRefClkSource = pll_id;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v4.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v4.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v4.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp)
args.v4.acConfig.fCoherentMode = 1; /* DP requires coherent */
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v4.acConfig.fCoherentMode = 1;
if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.acConfig.fDualLinkConnector = 1;
}
break;
case 5:
args.v5.ucAction = action;
if (is_dp)
args.v5.usSymClock = cpu_to_le16(dp_clock / 10);
else
args.v5.usSymClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYB;
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYA;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYD;
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYC;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYF;
else
args.v5.ucPhyId = ATOM_PHY_ID_UNIPHYE;
break;
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
args.v5.ucConnObjId = connector_object_id;
args.v5.ucDigMode = atombios_get_encoder_mode(encoder);
 
if (is_dp && rdev->clock.dp_extclk)
args.v5.asConfig.ucPhyClkSrcId = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v5.asConfig.ucPhyClkSrcId = pll_id;
 
if (is_dp)
args.v5.asConfig.ucCoherentMode = 1; /* DP requires coherent */
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v5.asConfig.ucCoherentMode = 1;
}
if (hpd_id == RADEON_HPD_NONE)
args.v5.asConfig.ucHPDSel = 0;
else
args.v5.asConfig.ucHPDSel = hpd_id + 1;
args.v5.ucDigEncoderSel = 1 << dig_encoder;
args.v5.ucDPLaneSet = lane_set;
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
 
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
goto done;
 
if (!ASIC_IS_DCE4(rdev))
goto done;
 
if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
(action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
goto done;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
goto done;
 
memset(&args, 0, sizeof(args));
 
args.v1.ucAction = action;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
int i;
 
for (i = 0; i < 300; i++) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
return true;
mdelay(1);
}
return false;
}
done:
return true;
}
 
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
 
static void
atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector;
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
u8 frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
 
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
connector = radeon_get_connector_for_encoder_init(encoder);
else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
}
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
/* no params on frev 1 */
break;
case 2:
switch (crev) {
case 1:
case 2:
args.v1.sDigEncoder.ucAction = action;
args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (ENCODER_MODE_IS_DP(args.v1.sDigEncoder.ucEncoderMode)) {
if (dp_clock == 270000)
args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.sDigEncoder.ucLaneNum = 8;
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
case 3:
args.v3.sExtEncoder.ucAction = action;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
else
args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (ENCODER_MODE_IS_DP(args.v3.sExtEncoder.ucEncoderMode)) {
if (dp_clock == 270000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.sExtEncoder.ucLaneNum = 8;
else
args.v3.sExtEncoder.ucLaneNum = 4;
switch (ext_enum) {
case GRAPH_OBJECT_ENUM_ID1:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
break;
case GRAPH_OBJECT_ENUM_ID2:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
break;
case GRAPH_OBJECT_ENUM_ID3:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
break;
}
args.v3.sExtEncoder.ucBitPerColor = radeon_atom_get_bpc(encoder);
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void
atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
ENABLE_YUV_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
uint32_t temp, reg;
 
memset(&args, 0, sizeof(args));
 
if (rdev->family >= CHIP_R600)
reg = R600_BIOS_3_SCRATCH;
else
reg = RADEON_BIOS_3_SCRATCH;
 
/* XXX: fix up scratch reg handling */
temp = RREG32(reg);
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
WREG32(reg, (ATOM_S3_TV1_ACTIVE |
(radeon_crtc->crtc_id << 18)));
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
else
WREG32(reg, 0);
 
if (enable)
args.ucEnable = ATOM_ENABLE;
args.ucCRTC = radeon_crtc->crtc_id;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
WREG32(reg, temp);
}
 
static void
radeon_atom_encoder_dpms_avivo(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
break;
default:
return;
}
 
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
/* workaround for DVOOutputControl on some RS690 systems */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) {
u32 reg = RREG32(RADEON_BIOS_3_SCRATCH);
WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE);
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
WREG32(RADEON_BIOS_3_SCRATCH, reg);
} else
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLOFF;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
}
}
 
static void
radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = NULL;
struct radeon_connector_atom_dig *radeon_dig_connector = NULL;
 
if (connector) {
radeon_connector = to_radeon_connector(connector);
radeon_dig_connector = radeon_connector->con_priv;
}
 
switch (mode) {
case DRM_MODE_DPMS_ON:
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
if (!connector)
dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
else
dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
 
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
atombios_dig_encoder_setup(encoder,
ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
dig->panel_mode);
if (ext_encoder) {
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
}
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else if (ASIC_IS_DCE4(rdev)) {
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
} else {
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
radeon_dig_connector->edp_on = false;
}
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
}
 
static void
radeon_atom_encoder_dpms_ext(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
}
 
static void
radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
 
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
radeon_atom_encoder_dpms_avivo(encoder, mode);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
radeon_atom_encoder_dpms_dig(encoder, mode);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
if (ASIC_IS_DCE5(rdev)) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
}
} else if (ASIC_IS_DCE3(rdev))
radeon_atom_encoder_dpms_dig(encoder, mode);
else
radeon_atom_encoder_dpms_avivo(encoder, mode);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (ASIC_IS_DCE5(rdev)) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dac_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dac_setup(encoder, ATOM_DISABLE);
break;
}
} else
radeon_atom_encoder_dpms_avivo(encoder, mode);
break;
default:
return;
}
 
if (ext_encoder)
radeon_atom_encoder_dpms_ext(encoder, ext_encoder, mode);
 
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
}
 
union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
};
 
static void
atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
switch (crev) {
case 1:
default:
if (ASIC_IS_AVIVO(rdev))
args.v1.ucCRTC = radeon_crtc->crtc_id;
else {
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
args.v1.ucCRTC = radeon_crtc->crtc_id;
} else {
args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
}
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
break;
}
break;
case 2:
args.v2.ucCRTC = radeon_crtc->crtc_id;
if (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS;
else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA)
args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT;
else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
} else
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
switch (dig->dig_encoder) {
case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case 1:
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case 2:
args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
break;
case 3:
args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
break;
case 4:
args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
break;
case 5:
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
break;
}
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* update scratch regs with new routing */
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
 
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
(dev->pdev->subsystem_device == 0x0080)) {
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
 
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
 
WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
}
}
 
/* set scaler clears this on some chips */
if (ASIC_IS_AVIVO(rdev) &&
(!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
} else {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
}
}
}
 
static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t dig_enc_in_use = 0;
 
if (ASIC_IS_DCE6(rdev)) {
/* DCE6 */
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
break;
}
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 */
if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) {
/* ontario follows DCE4 */
if (rdev->family == CHIP_PALM) {
if (dig->linkb)
return 1;
else
return 0;
} else
/* llano follows DCE3.2 */
return radeon_crtc->crtc_id;
} else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
break;
}
}
}
 
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
}
 
/* on DCE3 - LVTMA can only be driven by DIGB */
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_test_encoder;
 
if (encoder == test_encoder)
continue;
 
if (!radeon_encoder_is_digital(test_encoder))
continue;
 
radeon_test_encoder = to_radeon_encoder(test_encoder);
dig = radeon_test_encoder->enc_priv;
 
if (dig->dig_encoder >= 0)
dig_enc_in_use |= (1 << dig->dig_encoder);
}
 
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
if (dig_enc_in_use & 0x2)
DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
return 1;
}
if (!(dig_enc_in_use & 1))
return 0;
return 1;
}
 
/* This only needs to be called once at startup */
void
radeon_atom_encoder_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_encoder *encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
break;
default:
break;
}
 
if (ext_encoder && (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
}
}
 
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
radeon_encoder->pixel_clock = adjusted_mode->clock;
 
/* need to call this here rather than in prepare() since we need some crtc info */
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
atombios_yuv_setup(encoder, false);
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_ENABLE);
else
atombios_tv_setup(encoder, ATOM_DISABLE);
}
break;
}
 
atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
if (ASIC_IS_DCE6(rdev))
; /* TODO (use pointers instead of if-s?) */
else if (ASIC_IS_DCE4(rdev))
evergreen_hdmi_setmode(encoder, adjusted_mode);
else
r600_hdmi_setmode(encoder, adjusted_mode);
}
}
 
static bool
atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
ATOM_DEVICE_CV_SUPPORT |
ATOM_DEVICE_CRT_SUPPORT)) {
DAC_LOAD_DETECTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
uint8_t frev, crev;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return false;
 
args.sDacload.ucMisc = 0;
 
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
args.sDacload.ucDacType = ATOM_DAC_A;
else
args.sDacload.ucDacType = ATOM_DAC_B;
 
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
return true;
} else
return false;
}
 
static enum drm_connector_status
radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
uint32_t bios_0_scratch;
 
if (!atombios_dac_load_detect(encoder, connector)) {
DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
 
if (rdev->family >= CHIP_R600)
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
else
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
 
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
 
static enum drm_connector_status
radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
u32 bios_0_scratch;
 
if (!ASIC_IS_DCE4(rdev))
return connector_status_unknown;
 
if (!ext_encoder)
return connector_status_unknown;
 
if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
return connector_status_unknown;
 
/* load detect on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
 
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
 
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
 
void
radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
{
struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder);
 
if (ext_encoder)
/* ddc_setup on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
 
}
 
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_device *rdev = encoder->dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if ((radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) !=
ENCODER_OBJECT_ID_NONE)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig) {
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
if (radeon_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) {
if (rdev->family >= CHIP_R600)
dig->afmt = rdev->mode_info.afmt[dig->dig_encoder];
else
/* RS600/690/740 have only 1 afmt block */
dig->afmt = rdev->mode_info.afmt[0];
}
}
}
 
radeon_atom_output_lock(encoder, true);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
/* select the clock/data port if it uses a router */
if (radeon_connector->router.cd_valid)
radeon_router_select_cd_port(radeon_connector);
 
/* turn eDP panel on for mode set */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
}
 
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
}
 
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
{
/* need to call this here as we need the crtc set up */
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
radeon_atom_output_lock(encoder, false);
}
 
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
 
/* check for pre-DCE3 cards with shared encoders;
* can't really use the links individually, so don't disable
* the encoder if it's in use by another connector
*/
if (!ASIC_IS_DCE3(rdev)) {
struct drm_encoder *other_encoder;
struct radeon_encoder *other_radeon_encoder;
 
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
other_radeon_encoder = to_radeon_encoder(other_encoder);
if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
drm_helper_encoder_in_use(other_encoder))
goto disable_done;
}
}
 
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
/* handled in dpms */
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_DISABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_DISABLE);
break;
}
 
disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
radeon_encoder->active_device = 0;
}
 
/* these are handled by the primary encoders */
static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
{
 
}
 
static void radeon_atom_ext_commit(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
 
}
 
static void radeon_atom_ext_disable(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
{
 
}
 
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
 
static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
.dpms = radeon_atom_ext_dpms,
.mode_fixup = radeon_atom_ext_mode_fixup,
.prepare = radeon_atom_ext_prepare,
.mode_set = radeon_atom_ext_mode_set,
.commit = radeon_atom_ext_commit,
.disable = radeon_atom_ext_disable,
/* no detect for TMDS/LVDS yet */
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
.prepare = radeon_atom_encoder_prepare,
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.disable = radeon_atom_encoder_disable,
.detect = radeon_atom_dig_detect,
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
.prepare = radeon_atom_encoder_prepare,
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.detect = radeon_atom_dac_detect,
};
 
void radeon_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
radeon_atom_backlight_exit(radeon_encoder);
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
}
 
static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
.destroy = radeon_enc_destroy,
};
 
static struct radeon_encoder_atom_dac *
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
 
if (!dac)
return NULL;
 
dac->tv_std = radeon_atombios_get_tv_info(rdev);
return dac;
}
 
static struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
if (!dig)
return NULL;
 
/* coherent mode by default */
dig->coherent_mode = true;
dig->dig_encoder = -1;
 
if (encoder_enum == 2)
dig->linkb = true;
else
dig->linkb = false;
 
return dig;
}
 
void
radeon_add_atom_encoder(struct drm_device *dev,
uint32_t encoder_enum,
uint32_t supported_device,
u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
 
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
 
}
 
/* add a new one */
radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
if (!radeon_encoder)
return;
 
encoder = &radeon_encoder->base;
switch (rdev->num_crtc) {
case 1:
encoder->possible_crtcs = 0x1;
break;
case 2:
default:
encoder->possible_crtcs = 0x3;
break;
case 4:
encoder->possible_crtcs = 0xf;
break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
}
 
radeon_encoder->enc_priv = NULL;
 
radeon_encoder->encoder_enum = encoder_enum;
radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
radeon_encoder->caps = caps;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_SI170B:
case ENCODER_OBJECT_ID_CH7303:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
else
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
break;
}
}
/drivers/video/drm/radeon/atombios_i2c.c
0,0 → 1,139
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
 
#define TARGET_HW_I2C_CLOCK 50
 
/* these are a limitation of ProcessI2cChannelTransaction not the hw */
#define ATOM_MAX_HW_I2C_WRITE 2
#define ATOM_MAX_HW_I2C_READ 255
 
static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
u8 slave_addr, u8 flags,
u8 *buf, u8 num)
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction);
unsigned char *base;
u16 out;
 
memset(&args, 0, sizeof(args));
 
base = (unsigned char *)rdev->mode_info.atom_context->scratch;
 
if (flags & HW_I2C_WRITE) {
if (num > ATOM_MAX_HW_I2C_WRITE) {
DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num);
return -EINVAL;
}
memcpy(&out, buf, num);
args.lpI2CDataOut = cpu_to_le16(out);
} else {
if (num > ATOM_MAX_HW_I2C_READ) {
DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num);
return -EINVAL;
}
}
 
args.ucI2CSpeed = TARGET_HW_I2C_CLOCK;
args.ucRegIndex = 0;
args.ucTransBytes = num;
args.ucSlaveAddr = slave_addr << 1;
args.ucLineNumber = chan->rec.i2c_id;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* error */
if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
DRM_DEBUG_KMS("hw_i2c error\n");
return -EIO;
}
 
if (!(flags & HW_I2C_WRITE))
memcpy(buf, base, num);
 
return 0;
}
 
int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct i2c_msg *p;
int i, remaining, current_count, buffer_offset, max_bytes, ret;
u8 buf = 0, flags;
 
/* check for bus probe */
p = &msgs[0];
if ((num == 1) && (p->len == 0)) {
ret = radeon_process_i2c_ch(i2c,
p->addr, HW_I2C_WRITE,
&buf, 1);
if (ret)
return ret;
else
return num;
}
 
for (i = 0; i < num; i++) {
p = &msgs[i];
remaining = p->len;
buffer_offset = 0;
/* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */
if (p->flags & I2C_M_RD) {
max_bytes = ATOM_MAX_HW_I2C_READ;
flags = HW_I2C_READ;
} else {
max_bytes = ATOM_MAX_HW_I2C_WRITE;
flags = HW_I2C_WRITE;
}
while (remaining) {
if (remaining > max_bytes)
current_count = max_bytes;
else
current_count = remaining;
ret = radeon_process_i2c_ch(i2c,
p->addr, flags,
&p->buf[buffer_offset], current_count);
if (ret)
return ret;
remaining -= current_count;
buffer_offset += current_count;
}
}
 
return num;
}
 
u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
 
/drivers/video/drm/radeon/cayman_blit_shaders.c
24,6 → 24,7
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/kernel.h>
 
/drivers/video/drm/radeon/display.h
51,6 → 51,10
void (__stdcall *move_cursor)(cursor_t *cursor, int x, int y);
void (__stdcall *restore_cursor)(int x, int y);
void (*disable_mouse)(void);
u32 mask_seqno;
u32 check_mouse;
u32 check_m_pixel;
 
};
 
extern display_t *rdisplay;
/drivers/video/drm/radeon/evergreen.c
24,10 → 24,10
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include <drm/radeon_drm.h>
#include "evergreend.h"
#include "atom.h"
#include "avivod.h"
37,16 → 37,122
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
 
static const u32 crtc_offsets[6] =
{
EVERGREEN_CRTC0_REGISTER_OFFSET,
EVERGREEN_CRTC1_REGISTER_OFFSET,
EVERGREEN_CRTC2_REGISTER_OFFSET,
EVERGREEN_CRTC3_REGISTER_OFFSET,
EVERGREEN_CRTC4_REGISTER_OFFSET,
EVERGREEN_CRTC5_REGISTER_OFFSET
};
 
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl);
 
void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split)
{
*bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
*bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
*mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
*tile_split = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
switch (*bankw) {
default:
case 1: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_1; break;
case 2: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_2; break;
case 4: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_4; break;
case 8: *bankw = EVERGREEN_ADDR_SURF_BANK_WIDTH_8; break;
}
switch (*bankh) {
default:
case 1: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_1; break;
case 2: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_2; break;
case 4: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_4; break;
case 8: *bankh = EVERGREEN_ADDR_SURF_BANK_HEIGHT_8; break;
}
switch (*mtaspect) {
default:
case 1: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1; break;
case 2: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2; break;
case 4: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4; break;
case 8: *mtaspect = EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8; break;
}
}
 
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
u16 ctl, v;
int err;
 
err = pcie_capability_read_word(rdev->pdev, PCI_EXP_DEVCTL, &ctl);
if (err)
return;
 
v = (ctl & PCI_EXP_DEVCTL_READRQ) >> 12;
 
/* if bios or OS sets MAX_READ_REQUEST_SIZE to an invalid value, fix it
* to avoid hangs or perfomance issues
*/
if ((v == 0) || (v == 6) || (v == 7)) {
ctl &= ~PCI_EXP_DEVCTL_READRQ;
ctl |= (2 << 12);
pcie_capability_write_word(rdev->pdev, PCI_EXP_DEVCTL, ctl);
}
}
 
/**
* dce4_wait_for_vblank - vblank wait asic callback.
*
* @rdev: radeon_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (evergreen+).
*/
void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
 
if (crtc >= rdev->num_crtc)
return;
 
if (RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[crtc]) & EVERGREEN_CRTC_MASTER_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_CRTC_STATUS + crtc_offsets[crtc]) & EVERGREEN_CRTC_V_BLANK)
break;
udelay(1);
}
}
}
 
 
/**
* evergreen_page_flip - pageflip callback.
*
* @rdev: radeon_device pointer
* @crtc_id: crtc to cleanup pageflip on
* @crtc_base: new address of the crtc (GPU MC address)
*
* Does the actual pageflip (evergreen+).
* During vblank we take the crtc lock and wait for the update_pending
* bit to go high, when it does, we release the lock, and allow the
* double buffered update to take place.
* Returns the current update pending status.
*/
u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
int i;
 
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
64,7 → 170,11
(u32)crtc_base);
 
/* Wait for update_pending to go high. */
while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
break;
udelay(1);
}
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
/* Unlock the lock, so double-buffering can take place inside vblank */
122,6 → 232,74
return actual_temp * 1000;
}
 
/**
* sumo_pm_init_profile - Initialize power profiles callback.
*
* @rdev: radeon_device pointer
*
* Initialize the power states used in profile mode
* (sumo, trinity, SI).
* Used for profile mode only.
*/
void sumo_pm_init_profile(struct radeon_device *rdev)
{
int idx;
 
/* default */
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
 
/* low,mid sh/mh */
if (rdev->flags & RADEON_IS_MOBILITY)
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
else
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
 
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
 
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
 
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
 
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
 
/* high sh/mh */
idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
rdev->pm.power_state[idx].num_clock_modes - 1;
 
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
rdev->pm.power_state[idx].num_clock_modes - 1;
}
 
/**
* evergreen_pm_misc - set additional pm hw parameters callback.
*
* @rdev: radeon_device pointer
*
* Set non-clock parameters associated with a power state
* (voltage, etc.) (evergreen+).
*/
void evergreen_pm_misc(struct radeon_device *rdev)
{
int req_ps_idx = rdev->pm.requested_power_state_index;
149,6 → 327,13
}
}
 
/**
* evergreen_pm_prepare - pre-power state change callback.
*
* @rdev: radeon_device pointer
*
* Prepare for a power state change (evergreen+).
*/
void evergreen_pm_prepare(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
167,6 → 352,13
}
}
 
/**
* evergreen_pm_finish - post-power state change callback.
*
* @rdev: radeon_device pointer
*
* Clean up after a power state change (evergreen+).
*/
void evergreen_pm_finish(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
185,6 → 377,15
}
}
 
/**
* evergreen_hpd_sense - hpd sense callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Checks if a digital monitor is connected (evergreen+).
* Returns true if connected, false if not connected.
*/
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
221,6 → 422,14
return connected;
}
 
/**
* evergreen_hpd_set_polarity - hpd set polarity callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Set the polarity of the hpd pin (evergreen+).
*/
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
281,10 → 490,19
}
}
 
/**
* evergreen_hpd_init - hpd setup callback.
*
* @rdev: radeon_device pointer
*
* Setup the hpd pins used by the card (evergreen+).
* Enable the pin, set the polarity, and enable the hpd interrupts.
*/
void evergreen_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enabled = 0;
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) |
DC_HPDx_RX_INT_TIMER(0xfa) | DC_HPDx_EN;
 
293,40 → 511,44
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
enabled |= 1 << radeon_connector->hpd.hpd;
}
if (rdev->irq.installed)
evergreen_irq_set(rdev);
// radeon_irq_kms_enable_hpd(rdev, enabled);
}
 
/**
* evergreen_hpd_fini - hpd tear down callback.
*
* @rdev: radeon_device pointer
*
* Tear down the hpd pins used by the card (evergreen+).
* Disable the hpd interrupts.
*/
void evergreen_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disabled = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
333,32 → 555,28
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
disabled |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disabled);
}
 
/* watermark setup */
437,7 → 655,7
return 0;
}
 
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
{
u32 tmp = RREG32(MC_SHARED_CHMAP);
 
789,6 → 1007,14
 
}
 
/**
* evergreen_bandwidth_update - update display watermarks callback.
*
* @rdev: radeon_device pointer
*
* Update the display watermarks based on the requested mode(s)
* (evergreen+).
*/
void evergreen_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
812,6 → 1038,15
}
}
 
/**
* evergreen_mc_wait_for_idle - wait for MC idle callback.
*
* @rdev: radeon_device pointer
*
* Wait for the MC (memory controller) to be idle.
* (evergreen+).
* Returns 0 if the MC is idle, -1 if not.
*/
int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
853,12 → 1088,12
}
}
 
int evergreen_pcie_gart_enable(struct radeon_device *rdev)
static int evergreen_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
885,6 → 1120,11
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
if ((rdev->family == CHIP_JUNIPER) ||
(rdev->family == CHIP_CYPRESS) ||
(rdev->family == CHIP_HEMLOCK) ||
(rdev->family == CHIP_BARTS))
WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
900,14 → 1140,16
WREG32(VM_CONTEXT1_CNTL, 0);
 
evergreen_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
void evergreen_pcie_gart_disable(struct radeon_device *rdev)
static void evergreen_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
927,17 → 1169,10
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_gart_table_vram_unpin(rdev);
}
}
}
 
void evergreen_pcie_gart_fini(struct radeon_device *rdev)
static void evergreen_pcie_gart_fini(struct radeon_device *rdev)
{
evergreen_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
945,7 → 1180,7
}
 
 
void evergreen_agp_enable(struct radeon_device *rdev)
static void evergreen_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
 
973,175 → 1208,105
 
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
save->vga_control[0] = RREG32(D1VGA_CONTROL);
save->vga_control[1] = RREG32(D2VGA_CONTROL);
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
 
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
 
/* Stop all video */
/* disable VGA render */
WREG32(VGA_RENDER_CONTROL, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
/* blank the display controllers */
for (i = 0; i < rdev->num_crtc; i++) {
crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
if (crtc_enabled) {
save->crtc_enabled[i] = true;
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_BLANK_DATA_EN)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
if (!(tmp & EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE)) {
radeon_wait_for_vblank(rdev, i);
tmp |= EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
for (j = 0; j < rdev->usec_timeout; j++) {
if (radeon_get_vblank_counter(rdev, i) != frame_count)
break;
udelay(1);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
WREG32(D1VGA_CONTROL, 0);
WREG32(D2VGA_CONTROL, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, 0);
WREG32(EVERGREEN_D4VGA_CONTROL, 0);
radeon_mc_wait_for_idle(rdev);
 
blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
if ((blackout & BLACKOUT_MODE_MASK) != 1) {
/* Block CPU access */
WREG32(BIF_FB_EN, 0);
/* blackout the MC */
blackout &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, 0);
WREG32(EVERGREEN_D6VGA_CONTROL, 0);
}
}
 
void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
u32 tmp, frame_count;
int i, j;
 
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
/* update crtc base addresses */
for (i = 0; i < rdev->num_crtc; i++) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
(u32)rdev->mc.vram_start);
 
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
 
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
 
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
(u32)rdev->mc.vram_start);
}
/* unblackout the MC */
tmp = RREG32(MC_SHARED_BLACKOUT_CNTL);
tmp &= ~BLACKOUT_MODE_MASK;
WREG32(MC_SHARED_BLACKOUT_CNTL, tmp);
/* allow CPU access */
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(D1VGA_CONTROL, save->vga_control[0]);
WREG32(D2VGA_CONTROL, save->vga_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
for (i = 0; i < rdev->num_crtc; i++) {
if (save->crtc_enabled) {
if (ASIC_IS_DCE6(rdev)) {
tmp = RREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i]);
tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
} else {
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
tmp &= ~EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE;
WREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i], tmp);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
/* wait for the next frame */
frame_count = radeon_get_vblank_counter(rdev, i);
for (j = 0; j < rdev->usec_timeout; j++) {
if (radeon_get_vblank_counter(rdev, i) != frame_count)
break;
udelay(1);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
}
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
}
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
/* Unlock vga access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
 
1188,8 → 1353,11
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
if (rdev->flags & RADEON_IS_IGP) {
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
/* llano/ontario only */
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2)) {
tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
tmp |= ((rdev->mc.vram_start >> 20) & 0xF) << 20;
1224,18 → 1392,36
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 next_rptr;
 
/* set to DX10/11 mode */
radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(rdev, 1);
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
 
if (ring->rptr_save_reg) {
next_rptr = ring->wptr + 3 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
} else if (rdev->wb.enabled) {
next_rptr = ring->wptr + 5 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
radeon_ring_write(ring, next_rptr);
radeon_ring_write(ring, 0);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw);
}
 
 
1273,27 → 1459,28
 
static int evergreen_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
 
r = radeon_ring_lock(rdev, 7);
r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
 
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
 
r = radeon_ring_lock(rdev, evergreen_default_size + 19);
r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
1300,44 → 1487,45
}
 
/* setup clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
for (i = 0; i < evergreen_default_size; i++)
radeon_ring_write(rdev, evergreen_default_state[i]);
radeon_ring_write(ring, evergreen_default_state[i]);
 
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
 
/* Clear consts */
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(ring, 0xc0036f00);
radeon_ring_write(ring, 0x00000bc4);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
 
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
radeon_ring_write(ring, 0xc0026900);
radeon_ring_write(ring, 0x00000316);
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
 
return 0;
}
 
int evergreen_cp_resume(struct radeon_device *rdev)
static int evergreen_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
1355,13 → 1543,14
RREG32(GRBM_SOFT_RESET);
 
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
1369,7 → 1558,8
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
1387,17 → 1577,16
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
 
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
ring->rptr = RREG32(CP_RB_RPTR);
 
evergreen_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
rdev->cp.ready = false;
ring->ready = false;
return r;
}
return 0;
1406,205 → 1595,10
/*
* Core functions
*/
static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 cur_pipe;
u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
 
if (num_tile_pipes > EVERGREEN_MAX_PIPES)
num_tile_pipes = EVERGREEN_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > EVERGREEN_MAX_BACKENDS)
num_backends = EVERGREEN_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
 
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
 
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
 
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
force_no_swizzle = false;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_JUNIPER:
case CHIP_BARTS:
default:
force_no_swizzle = true;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
 
force_no_swizzle = false;
for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
 
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
 
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
 
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 
cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
}
 
return backend_map;
}
 
static void evergreen_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
 
switch (rdev->family) {
case CHIP_HEMLOCK:
case CHIP_CYPRESS:
case CHIP_BARTS:
tcp_chan_steer_lo = 0x54763210;
tcp_chan_steer_hi = 0x0000ba98;
break;
case CHIP_JUNIPER:
case CHIP_REDWOOD:
case CHIP_CEDAR:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_TURKS:
case CHIP_CAICOS:
default:
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
 
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static void evergreen_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 gb_addr_config;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 grbm_gfx_index;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 sq_config;
1619,6 → 1613,7
u32 sq_stack_resource_mgmt_3;
u32 vgt_cache_invalidation;
u32 hdp_host_path_cntl, tmp;
u32 disabled_rb_mask;
int i, j, num_shader_engines, ps_thread_count;
 
switch (rdev->family) {
1643,6 → 1638,7
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_JUNIPER:
rdev->config.evergreen.num_ses = 1;
1664,6 → 1660,7
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_REDWOOD:
rdev->config.evergreen.num_ses = 1;
1685,6 → 1682,7
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CEDAR:
default:
1707,6 → 1705,7
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PALM:
rdev->config.evergreen.num_ses = 1;
1728,6 → 1727,7
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO:
rdev->config.evergreen.num_ses = 1;
1755,6 → 1755,7
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_SUMO2:
rdev->config.evergreen.num_ses = 1;
1776,6 → 1777,7
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_BARTS:
rdev->config.evergreen.num_ses = 2;
1797,6 → 1799,7
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_TURKS:
rdev->config.evergreen.num_ses = 1;
1818,6 → 1821,7
rdev->config.evergreen.sc_prim_fifo_size = 0x100;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_CAICOS:
rdev->config.evergreen.num_ses = 1;
1839,6 → 1843,7
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
break;
}
 
1853,154 → 1858,16
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
evergreen_fix_pci_max_read_req_size(rdev);
 
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
& EVERGREEN_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
& EVERGREEN_MAX_SIMDS_MASK);
 
cc_rb_backend_disable =
BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
& EVERGREEN_MAX_BACKENDS_MASK);
 
 
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
if (rdev->flags & RADEON_IS_IGP)
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2))
mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
else
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
switch (rdev->config.evergreen.max_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
 
gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
 
if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
gb_addr_config |= ROW_SIZE(2);
else
gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
 
if (rdev->ddev->pdev->device == 0x689e) {
u32 efuse_straps_4;
u32 efuse_straps_3;
u8 efuse_box_bit_131_124;
 
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
 
switch(efuse_box_bit_131_124) {
case 0x00:
gb_backend_map = 0x76543210;
break;
case 0x55:
gb_backend_map = 0x77553311;
break;
case 0x56:
gb_backend_map = 0x77553300;
break;
case 0x59:
gb_backend_map = 0x77552211;
break;
case 0x66:
gb_backend_map = 0x77443300;
break;
case 0x99:
gb_backend_map = 0x66552211;
break;
case 0x5a:
gb_backend_map = 0x77552200;
break;
case 0xaa:
gb_backend_map = 0x66442200;
break;
case 0x95:
gb_backend_map = 0x66553311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else if (rdev->ddev->pdev->device == 0x68b9) {
u32 efuse_straps_3;
u8 efuse_box_bit_127_124;
 
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
 
switch(efuse_box_bit_127_124) {
case 0x0:
gb_backend_map = 0x00003210;
break;
case 0x5:
case 0x6:
case 0x9:
case 0xa:
gb_backend_map = 0x00003311;
break;
default:
DRM_ERROR("bad backend map, using default\n");
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
break;
}
} else {
switch (rdev->family) {
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_BARTS:
gb_backend_map = 0x66442200;
break;
case CHIP_JUNIPER:
gb_backend_map = 0x00002200;
break;
default:
gb_backend_map =
evergreen_get_tile_pipe_to_backend_map(rdev,
rdev->config.evergreen.max_tile_pipes,
rdev->config.evergreen.max_backends,
((EVERGREEN_MAX_BACKENDS_MASK <<
rdev->config.evergreen.max_backends) &
EVERGREEN_MAX_BACKENDS_MASK));
}
}
 
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
2027,47 → 1894,62
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.evergreen.tile_config |= 1 << 4;
else
else {
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.evergreen.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.evergreen.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.evergreen.tile_config |= 2 << 4;
break;
}
}
rdev->config.evergreen.tile_config |= 0 << 8;
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.evergreen.tile_config |=
((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
rdev->config.evergreen.tile_config |=
((gb_addr_config & 0x30000000) >> 28) << 12;
 
rdev->config.evergreen.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
 
evergreen_program_channel_remap(rdev);
if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
u32 efuse_straps_4;
u32 efuse_straps_3;
 
num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1;
grbm_gfx_index = INSTANCE_BROADCAST_WRITES;
WREG32(RCU_IND_INDEX, 0x204);
efuse_straps_4 = RREG32(RCU_IND_DATA);
WREG32(RCU_IND_INDEX, 0x203);
efuse_straps_3 = RREG32(RCU_IND_DATA);
tmp = (((efuse_straps_4 & 0xf) << 4) |
((efuse_straps_3 & 0xf0000000) >> 28));
} else {
tmp = 0;
for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
u32 rb_disable_bitmap;
 
for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
u32 rb = cc_rb_backend_disable | (0xf0 << 16);
u32 sp = cc_gc_shader_pipe_config;
u32 gfx = grbm_gfx_index | SE_INDEX(i);
 
if (i == num_shader_engines) {
rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK);
sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK);
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
tmp <<= 4;
tmp |= rb_disable_bitmap;
}
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
 
WREG32(GRBM_GFX_INDEX, gfx);
WREG32(RLC_GFX_INDEX, gfx);
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
WREG32(CC_RB_BACKEND_DISABLE, rb);
WREG32(CC_SYS_RB_BACKEND_DISABLE, rb);
WREG32(GC_USER_RB_BACKEND_DISABLE, rb);
WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
}
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
grbm_gfx_index |= SE_BROADCAST_WRITES;
WREG32(GRBM_GFX_INDEX, grbm_gfx_index);
WREG32(RLC_GFX_INDEX, grbm_gfx_index);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
WREG32(GB_BACKEND_MAP, tmp);
 
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
2095,6 → 1977,9
smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
 
if (rdev->family <= CHIP_SUMO2)
WREG32(SMX_SAR_CTL0, 0x00010000);
 
WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
2260,7 → 2145,9
 
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
if (rdev->flags & RADEON_IS_IGP)
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2))
tmp = RREG32(FUS_MC_ARB_RAMCFG);
else
tmp = RREG32(MC_ARB_RAMCFG);
2292,12 → 2179,14
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* Setup GPU memory space */
if (rdev->flags & RADEON_IS_IGP) {
if ((rdev->family == CHIP_PALM) ||
(rdev->family == CHIP_SUMO) ||
(rdev->family == CHIP_SUMO2)) {
/* size in bytes on fusion */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
} else {
/* size in MB on evergreen */
/* size in MB on evergreen/cayman/tn */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
}
2308,13 → 2197,11
return 0;
}
 
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
int r;
 
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
2321,20 → 2208,13
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
rdev->cp.rptr = RREG32(CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
2353,6 → 2233,14
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
2390,6 → 2278,14
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
2403,28 → 2299,22
 
u32 evergreen_get_vblank_counter(struct radeon_device *rdev, int crtc)
{
switch (crtc) {
case 0:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC0_REGISTER_OFFSET);
case 1:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC1_REGISTER_OFFSET);
case 2:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC2_REGISTER_OFFSET);
case 3:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC3_REGISTER_OFFSET);
case 4:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC4_REGISTER_OFFSET);
case 5:
return RREG32(CRTC_STATUS_FRAME_COUNT + EVERGREEN_CRTC5_REGISTER_OFFSET);
default:
if (crtc >= rdev->num_crtc)
return 0;
else
return RREG32(CRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]);
}
}
 
void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
 
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0,
CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
cayman_cp_int_cntl_setup(rdev, 1, 0);
cayman_cp_int_cntl_setup(rdev, 2, 0);
} else
WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
2449,6 → 2339,8
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
/* only one DAC on DCE6 */
if (!ASIC_IS_DCE6(rdev))
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
 
2470,10 → 2362,12
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
u32 afmt1 = 0, afmt2 = 0, afmt3 = 0, afmt4 = 0, afmt5 = 0, afmt6 = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
2494,38 → 2388,62
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
if (rdev->irq.sw_int) {
DRM_DEBUG("evergreen_irq_set: sw int\n");
afmt1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt2 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt3 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt4 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt5 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
afmt6 = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
 
if (rdev->family >= CHIP_CAYMAN) {
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
} else {
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
}
 
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("evergreen_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
rdev->irq.pflip[2]) {
atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("evergreen_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
rdev->irq.pflip[3]) {
atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("evergreen_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
rdev->irq.pflip[4]) {
atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("evergreen_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
rdev->irq.pflip[5]) {
atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("evergreen_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
2553,11 → 2471,36
DRM_DEBUG("evergreen_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
if (rdev->irq.afmt[0]) {
DRM_DEBUG("evergreen_irq_set: hdmi 0\n");
afmt1 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[1]) {
DRM_DEBUG("evergreen_irq_set: hdmi 1\n");
afmt2 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[2]) {
DRM_DEBUG("evergreen_irq_set: hdmi 2\n");
afmt3 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[3]) {
DRM_DEBUG("evergreen_irq_set: hdmi 3\n");
afmt4 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[4]) {
DRM_DEBUG("evergreen_irq_set: hdmi 4\n");
afmt5 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.afmt[5]) {
DRM_DEBUG("evergreen_irq_set: hdmi 5\n");
afmt6 |= AFMT_AZ_FORMAT_WTRIG_MASK;
}
 
if (rdev->family >= CHIP_CAYMAN) {
cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
} else
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
2590,10 → 2533,17
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, afmt1);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, afmt2);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, afmt3);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, afmt4);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
 
return 0;
}
 
static inline void evergreen_irq_ack(struct radeon_device *rdev)
static void evergreen_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
 
2614,6 → 2564,13
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
 
rdev->irq.stat_regs.evergreen.afmt_status1 = RREG32(AFMT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status2 = RREG32(AFMT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status3 = RREG32(AFMT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status4 = RREG32(AFMT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status5 = RREG32(AFMT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.afmt_status6 = RREG32(AFMT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
 
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
2687,9 → 2644,55
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, tmp);
}
static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, tmp);
}
if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, tmp);
}
}
 
static void evergreen_irq_disable(struct radeon_device *rdev)
{
r600_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
evergreen_irq_ack(rdev);
evergreen_disable_interrupt_state(rdev);
}
 
void evergreen_irq_suspend(struct radeon_device *rdev)
{
evergreen_irq_disable(rdev);
r600_rlc_stop(rdev);
}
 
static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
 
if (rdev->wb.enabled)
2718,22 → 2721,22
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
 
wptr = evergreen_get_ih_wptr(rdev);
 
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
 
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
spin_lock_irqsave(&rdev->ih.lock, flags);
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
return IRQ_NONE;
}
restart_ih:
/* Order reading of wptr vs. reading of IH ring data */
rmb();
 
2740,7 → 2743,6
/* display interrupts */
evergreen_irq_ack(rdev);
 
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
2953,20 → 2955,80
break;
}
break;
case 44: /* hdmi */
switch (src_data) {
case 0:
if (rdev->irq.stat_regs.evergreen.afmt_status1 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status1 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI0\n");
}
break;
case 1:
if (rdev->irq.stat_regs.evergreen.afmt_status2 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status2 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI1\n");
}
break;
case 2:
if (rdev->irq.stat_regs.evergreen.afmt_status3 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status3 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI2\n");
}
break;
case 3:
if (rdev->irq.stat_regs.evergreen.afmt_status4 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status4 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI3\n");
}
break;
case 4:
if (rdev->irq.stat_regs.evergreen.afmt_status5 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status5 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI4\n");
}
break;
case 5:
if (rdev->irq.stat_regs.evergreen.afmt_status6 & AFMT_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.evergreen.afmt_status6 &= ~AFMT_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI5\n");
}
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev);
if (rdev->family >= CHIP_CAYMAN) {
switch (src_data) {
case 0:
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 1:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
break;
case 2:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
break;
}
} else
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2977,24 → 3039,24
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
 
/* make sure wptr hasn't changed while processing */
wptr = evergreen_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
if (wptr != rptr)
goto restart_ih;
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
 
return IRQ_HANDLED;
}
 
static int evergreen_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
if (!ASIC_IS_DCE5(rdev))
evergreen_pcie_gen2_enable(rdev);
 
if (ASIC_IS_DCE5(rdev)) {
3020,6 → 3082,10
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
evergreen_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
evergreen_agp_enable(rdev);
3032,8 → 3098,8
 
r = evergreen_blit_init(rdev);
if (r) {
evergreen_blit_fini(rdev);
rdev->asic->copy = NULL;
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
3051,7 → 3117,9
}
evergreen_irq_set(rdev);
 
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
3066,8 → 3134,8
 
 
 
#if 0
 
 
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
3088,6 → 3156,7
mutex_unlock(&rdev->r600_blit.mutex);
return 0;
}
#endif
 
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
3099,10 → 3168,6
{
int r;
 
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
3159,8 → 3224,8
if (r)
return r;
 
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
3175,25 → 3240,26
dev_err(rdev->dev, "disabling GPU acceleration\n");
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
 
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
* is loaded are not suffient for advanced operations.
*/
if (ASIC_IS_DCE5(rdev)) {
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
 
return 0;
}
 
 
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
{
u32 link_width_cntl, speed_cntl;
u32 link_width_cntl, speed_cntl, mask;
int ret;
 
if (radeon_pcie_gen2 == 0)
return;
3208,7 → 3274,21
if (ASIC_IS_X2(rdev))
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & DRM_PCIE_SPEED_50))
return;
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
 
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
(speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
 
/drivers/video/drm/radeon/evergreen_blit_kms.c
24,31 → 24,21
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
#include "evergreend.h"
#include "evergreen_blit_shaders.h"
#include "cayman_blit_shaders.h"
#include "radeon_blit_common.h"
 
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
 
#define FMT_8 0x1
#define FMT_5_6_5 0x8
#define FMT_8_8_8_8 0x1a
#define COLOR_8 0x1
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
 
/* emits 17 */
static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
 
56,27 → 46,29
if (h < 8)
h = 8;
 
cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
cb_color_info = CB_FORMAT(format) |
CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, pitch);
radeon_ring_write(rdev, slice);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, cb_color_info);
radeon_ring_write(rdev, (1 << 4));
radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, pitch);
radeon_ring_write(ring, slice);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, cb_color_info);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
}
 
/* emits 5dw */
85,6 → 77,7
u32 sync_type, u32 size,
u64 mc_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
 
if (size == 0xffffffff)
92,35 → 85,45
else
cp_coher_size = ((size + 255) >> 8);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, sync_type);
radeon_ring_write(rdev, cp_coher_size);
radeon_ring_write(rdev, mc_addr >> 8);
radeon_ring_write(rdev, 10); /* poll interval */
if (rdev->family >= CHIP_CAYMAN) {
/* CP_COHER_CNTL2 has to be set manually when submitting a surface_sync
* to the RB directly. For IBs, the CP programs this as part of the
* surface_sync packet.
*/
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
}
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, sync_type);
radeon_ring_write(ring, cp_coher_size);
radeon_ring_write(ring, mc_addr >> 8);
radeon_ring_write(ring, 10); /* poll interval */
}
 
/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
 
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, 2);
radeon_ring_write(ring, 0);
 
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, 1);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 2);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, 1);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 2);
 
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
130,26 → 133,31
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
 
/* high addr, stride */
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
SQ_VTXC_STRIDE(16);
#ifdef __BIG_ENDIAN
sq_vtx_constant_word2 |= (2 << 30);
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
/* xyzw swizzles */
sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
sq_vtx_constant_word3 = SQ_VTCX_SEL_X(SQ_SEL_X) |
SQ_VTCX_SEL_Y(SQ_SEL_Y) |
SQ_VTCX_SEL_Z(SQ_SEL_Z) |
SQ_VTCX_SEL_W(SQ_SEL_W);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0x580);
radeon_ring_write(rdev, gpu_addr & 0xffffffff);
radeon_ring_write(rdev, 48 - 1); /* size */
radeon_ring_write(rdev, sq_vtx_constant_word2);
radeon_ring_write(rdev, sq_vtx_constant_word3);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(ring, 0x580);
radeon_ring_write(ring, gpu_addr & 0xffffffff);
radeon_ring_write(ring, 48 - 1); /* size */
radeon_ring_write(ring, sq_vtx_constant_word2);
radeon_ring_write(ring, sq_vtx_constant_word3);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
 
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
168,8 → 176,9
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr)
u64 gpu_addr, u32 size)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
 
176,25 → 185,33
if (h < 1)
h = 1;
 
sq_tex_resource_word0 = (1 << 0); /* 2D */
sq_tex_resource_word0 = TEX_DIM(SQ_TEX_DIM_2D);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
((w - 1) << 18));
sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
sq_tex_resource_word1 = ((h - 1) << 0) |
TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
/* xyzw swizzles */
sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
sq_tex_resource_word4 = TEX_DST_SEL_X(SQ_SEL_X) |
TEX_DST_SEL_Y(SQ_SEL_Y) |
TEX_DST_SEL_Z(SQ_SEL_Z) |
TEX_DST_SEL_W(SQ_SEL_W);
 
sq_tex_resource_word7 = format | (SQ_TEX_VTX_VALID_TEXTURE << 30);
sq_tex_resource_word7 = format |
S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_TEXTURE);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word0);
radeon_ring_write(rdev, sq_tex_resource_word1);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, sq_tex_resource_word4);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word7);
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_tex_resource_word0);
radeon_ring_write(ring, sq_tex_resource_word1);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, sq_tex_resource_word4);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_tex_resource_word7);
}
 
/* emits 12 */
202,30 → 219,31
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
if (y2 == 0)
y1 = 1;
if (rdev->family == CHIP_CAYMAN) {
if (rdev->family >= CHIP_CAYMAN) {
if ((x2 == 1) && (y2 == 1))
x2 = 2;
}
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
 
/* emits 10 */
232,23 → 250,24
static void
draw_auto(struct radeon_device *rdev)
{
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, DI_PT_RECTLIST);
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, DI_PT_RECTLIST);
 
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
 
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(ring, 1);
 
radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(rdev, 3);
radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(ring, 3);
radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
 
}
 
256,6 → 275,7
static void
set_default_state(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
269,8 → 289,8
int dwords;
 
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
 
if (rdev->family < CHIP_CAYMAN) {
switch (rdev->family) {
527,88 → 547,63
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
 
/* disable dyn gprs */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
 
/* setup LDS */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, 0x10001000);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0x10001000);
 
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(rdev, sq_config);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_thread_resource_mgmt);
radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, sq_config);
radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_thread_resource_mgmt);
radeon_ring_write(ring, sq_thread_resource_mgmt_2);
radeon_ring_write(ring, sq_stack_resource_mgmt_1);
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
radeon_ring_write(ring, sq_stack_resource_mgmt_3);
}
 
/* CONTEXT_CONTROL */
radeon_ring_write(rdev, 0xc0012800);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(ring, 0xc0012800);
radeon_ring_write(ring, 0x80000000);
radeon_ring_write(ring, 0x80000000);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
 
/* SET_SAMPLER */
radeon_ring_write(rdev, 0xc0036e00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000012);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(ring, 0xc0036e00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000012);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
 
/* set to DX10/11 mode */
radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(rdev, 1);
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
 
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(ring, dwords);
 
}
 
static inline uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
 
if ((input & 0x3fff) == 0)
result = 0; /* 0 is a special case */
else {
exponent = 140; /* exponent biased by 127; */
fraction = (input & 0x3fff) << 10; /* cheat and only
handle numbers below 2^^15 */
for (i = 0; i < 14; i++) {
if (fraction & 0x800000)
break;
else {
fraction = fraction << 1; /* keep
shifting left until top bit = 1 */
exponent = exponent - 1;
}
}
result = exponent << 23 | (fraction & 0x7fffff); /* mask
off top bit; assumed 1 */
}
return result;
}
 
int evergreen_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
616,7 → 611,28
void *ptr;
u32 packet2s[16];
int num_packet2s = 0;
#if 0
rdev->r600_blit.primitives.set_render_target = set_render_target;
rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
rdev->r600_blit.primitives.set_shaders = set_shaders;
rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
rdev->r600_blit.primitives.set_scissors = set_scissors;
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
 
rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
rdev->r600_blit.ring_size_common += 55; /* shaders + def state */
rdev->r600_blit.ring_size_common += 16; /* fence emit for VB IB */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
 
rdev->r600_blit.ring_size_per_loop = 74;
if (rdev->family >= CHIP_CAYMAN)
rdev->r600_blit.ring_size_per_loop += 9; /* additional DWs for surface sync */
 
rdev->r600_blit.max_dim = 16384;
 
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
710,279 → 726,8
return r;
}
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
 
void evergreen_blit_fini(struct radeon_device *rdev)
{
int r;
#endif
 
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
if (rdev->r600_blit.shader_obj == NULL)
return;
/* If we can't reserve the bo, unref should be enough to destroy
* it when it becomes idle.
*/
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (!r) {
radeon_bo_unpin(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
}
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
 
static int evergreen_vb_ib_get(struct radeon_device *rdev)
{
int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
 
rdev->r600_blit.vb_total = 64*1024;
rdev->r600_blit.vb_used = 0;
return 0;
}
 
static void evergreen_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
}
 
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
{
int r;
int ring_size, line_size;
int max_size;
/* loops of emits + fence emit possible */
int dwords_per_loop = 74, num_loops;
 
r = evergreen_vb_ib_get(rdev);
if (r)
return r;
 
/* 8 bpp vs 32 bpp for xfer unit */
if (size_bytes & 3)
line_size = 8192;
else
line_size = 8192 * 4;
 
max_size = 8192 * line_size;
 
/* major loops cover the max size transfer */
num_loops = ((size_bytes + max_size) / max_size);
/* minor loops cover the extra non aligned bits */
num_loops += ((size_bytes % line_size) ? 1 : 0);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 55; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
 
set_default_state(rdev); /* 36 */
set_shaders(rdev); /* 16 */
return 0;
}
 
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
int r;
 
if (rdev->r600_blit.vb_ib)
evergreen_vb_ib_put(rdev);
 
if (fence)
r = radeon_fence_emit(rdev, fence);
 
radeon_ring_unlock_commit(rdev);
}
 
void evergreen_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes)
{
int max_bytes;
u64 vb_gpu_addr;
u32 *vb;
 
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
size_bytes, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb[0] = i2f(dst_x);
vb[1] = 0;
vb[2] = i2f(src_x);
vb[3] = 0;
 
vb[4] = i2f(dst_x);
vb[5] = i2f(h);
vb[6] = i2f(src_x);
vb[7] = i2f(h);
 
vb[8] = i2f(dst_x + cur_size);
vb[9] = i2f(h);
vb[10] = i2f(src_x + cur_size);
vb[11] = i2f(h);
 
/* src 10 */
set_tex_resource(rdev, FMT_8,
src_x + cur_size, h, src_x + cur_size,
src_gpu_addr);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
 
/* dst 17 */
set_render_target(rdev, COLOR_8,
dst_x + cur_size, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
 
/* 15 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
 
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
} else {
max_bytes = 8192 * 4;
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
vb[2] = i2f(src_x / 4);
vb[3] = 0;
 
vb[4] = i2f(dst_x / 4);
vb[5] = i2f(h);
vb[6] = i2f(src_x / 4);
vb[7] = i2f(h);
 
vb[8] = i2f((dst_x + cur_size) / 4);
vb[9] = i2f(h);
vb[10] = i2f((src_x + cur_size) / 4);
vb[11] = i2f(h);
 
/* src 10 */
set_tex_resource(rdev, FMT_8_8_8_8,
(src_x + cur_size) / 4,
h, (src_x + cur_size) / 4,
src_gpu_addr);
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
/* dst 17 */
set_render_target(rdev, COLOR_8_8_8_8,
(dst_x + cur_size) / 4, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
 
/* Vertex buffer setup 15 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
 
/* 74 ring dwords per loop */
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
}
}
 
/drivers/video/drm/radeon/evergreen_blit_shaders.c
24,6 → 24,7
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/kernel.h>
 
/drivers/video/drm/radeon/evergreen_hdmi.c
0,0 → 1,213
/*
* Copyright 2008 Advanced Micro Devices, Inc.
* Copyright 2008 Red Hat Inc.
* Copyright 2009 Christian König.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Christian König
* Rafał Miłecki
*/
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "evergreend.h"
#include "atom.h"
 
/*
* update the N and CTS parameters for a given pixel clock rate
*/
static void evergreen_hdmi_update_ACR(struct drm_encoder *encoder, uint32_t clock)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
WREG32(HDMI_ACR_32_0 + offset, HDMI_ACR_CTS_32(acr.cts_32khz));
WREG32(HDMI_ACR_32_1 + offset, acr.n_32khz);
 
WREG32(HDMI_ACR_44_0 + offset, HDMI_ACR_CTS_44(acr.cts_44_1khz));
WREG32(HDMI_ACR_44_1 + offset, acr.n_44_1khz);
 
WREG32(HDMI_ACR_48_0 + offset, HDMI_ACR_CTS_48(acr.cts_48khz));
WREG32(HDMI_ACR_48_1 + offset, acr.n_48khz);
}
 
/*
* calculate the crc for a given info frame
*/
static void evergreen_hdmi_infoframe_checksum(uint8_t packetType,
uint8_t versionNumber,
uint8_t length,
uint8_t *frame)
{
int i;
frame[0] = packetType + versionNumber + length;
for (i = 1; i <= length; i++)
frame[0] += frame[i];
frame[0] = 0x100 - frame[0];
}
 
/*
* build a HDMI Video Info Frame
*/
static void evergreen_hdmi_videoinfoframe(
struct drm_encoder *encoder,
uint8_t color_format,
int active_information_present,
uint8_t active_format_aspect_ratio,
uint8_t scan_information,
uint8_t colorimetry,
uint8_t ex_colorimetry,
uint8_t quantization,
int ITC,
uint8_t picture_aspect_ratio,
uint8_t video_format_identification,
uint8_t pixel_repetition,
uint8_t non_uniform_picture_scaling,
uint8_t bar_info_data_valid,
uint16_t top_bar,
uint16_t bottom_bar,
uint16_t left_bar,
uint16_t right_bar
)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
uint8_t frame[14];
 
frame[0x0] = 0;
frame[0x1] =
(scan_information & 0x3) |
((bar_info_data_valid & 0x3) << 2) |
((active_information_present & 0x1) << 4) |
((color_format & 0x3) << 5);
frame[0x2] =
(active_format_aspect_ratio & 0xF) |
((picture_aspect_ratio & 0x3) << 4) |
((colorimetry & 0x3) << 6);
frame[0x3] =
(non_uniform_picture_scaling & 0x3) |
((quantization & 0x3) << 2) |
((ex_colorimetry & 0x7) << 4) |
((ITC & 0x1) << 7);
frame[0x4] = (video_format_identification & 0x7F);
frame[0x5] = (pixel_repetition & 0xF);
frame[0x6] = (top_bar & 0xFF);
frame[0x7] = (top_bar >> 8);
frame[0x8] = (bottom_bar & 0xFF);
frame[0x9] = (bottom_bar >> 8);
frame[0xA] = (left_bar & 0xFF);
frame[0xB] = (left_bar >> 8);
frame[0xC] = (right_bar & 0xFF);
frame[0xD] = (right_bar >> 8);
 
evergreen_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
* by 2 in comparison to fglrx. It breaks displaying anything in case
* of TVs that strictly check the checksum. Hack it manually here to
* workaround this issue. */
frame[0x0] += 2;
 
WREG32(AFMT_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(AFMT_AVI_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
WREG32(AFMT_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(AFMT_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8));
}
 
/*
* update the info frames with the data from the current display mode
*/
void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
 
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
// r600_audio_set_clock(encoder, mode->clock);
 
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND); /* send null packets when required */
 
WREG32(AFMT_AUDIO_CRC_CONTROL + offset, 0x1000);
 
WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
 
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
 
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI_ACR_SOURCE); /* select SW CTS value */
 
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
HDMI_NULL_SEND | /* send null packets when required */
HDMI_GC_SEND | /* send general control packets */
HDMI_GC_CONT); /* send general control packets every frame */
 
WREG32(HDMI_INFOFRAME_CONTROL0 + offset,
HDMI_AVI_INFO_SEND | /* enable AVI info frames */
HDMI_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
 
WREG32(AFMT_INFOFRAME_CONTROL0 + offset,
AFMT_AUDIO_INFO_UPDATE); /* required for audio info values to be updated */
 
WREG32(HDMI_INFOFRAME_CONTROL1 + offset,
HDMI_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
WREG32(HDMI_GC + offset, 0); /* unset HDMI_GC_AVMUTE */
 
evergreen_hdmi_videoinfoframe(encoder, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0);
 
evergreen_hdmi_update_ACR(encoder, mode->clock);
 
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(AFMT_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
}
/drivers/video/drm/radeon/evergreen_reg.h
35,6 → 35,14
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
 
#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
 
#define EVERGREEN_AUDIO_ENABLE 0x5e78
#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
 
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
42,6 → 50,17
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
# define EVERGREEN_ADDR_SURF_2_BANK 0
# define EVERGREEN_ADDR_SURF_4_BANK 1
# define EVERGREEN_ADDR_SURF_8_BANK 2
# define EVERGREEN_ADDR_SURF_16_BANK 3
# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
61,6 → 80,24
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
181,7 → 218,10
#define EVERGREEN_CRTC_CONTROL 0x6e70
# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
# define EVERGREEN_CRTC_DISP_READ_REQUEST_DISABLE (1 << 24)
#define EVERGREEN_CRTC_BLANK_CONTROL 0x6e74
# define EVERGREEN_CRTC_BLANK_DATA_EN (1 << 8)
#define EVERGREEN_CRTC_STATUS 0x6e8c
# define EVERGREEN_CRTC_V_BLANK (1 << 0)
#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
191,4 → 231,7
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
 
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
 
#endif
/drivers/video/drm/radeon/evergreend.h
37,6 → 37,15
#define EVERGREEN_MAX_PIPES_MASK 0xFF
#define EVERGREEN_MAX_LDS_NUM 0xFFFF
 
#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
 
/* Registers */
 
#define RCU_IND_INDEX 0x100
54,6 → 63,7
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
#define NUM_PIPES_MASK 0x0000000f
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
#define NUM_SHADER_ENGINES(x) ((x) << 12)
77,6 → 87,15
 
#define CONFIG_MEMSIZE 0x5428
 
#define BIF_FB_EN 0x5490
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
 
#define CP_COHER_BASE 0x85F8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
108,9 → 127,230
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
 
/* Audio clocks */
#define DCCG_AUDIO_DTO_SOURCE 0x05ac
# define DCCG_AUDIO_DTO0_SOURCE_SEL(x) ((x) << 0) /* crtc0 - crtc5 */
# define DCCG_AUDIO_DTO_SEL (1 << 4) /* 0=dto0 1=dto1 */
 
#define DCCG_AUDIO_DTO0_PHASE 0x05b0
#define DCCG_AUDIO_DTO0_MODULE 0x05b4
#define DCCG_AUDIO_DTO0_LOAD 0x05b8
#define DCCG_AUDIO_DTO0_CNTL 0x05bc
 
#define DCCG_AUDIO_DTO1_PHASE 0x05c0
#define DCCG_AUDIO_DTO1_MODULE 0x05c4
#define DCCG_AUDIO_DTO1_LOAD 0x05c8
#define DCCG_AUDIO_DTO1_CNTL 0x05cc
 
/* DCE 4.0 AFMT */
#define HDMI_CONTROL 0x7030
# define HDMI_KEEPOUT_MODE (1 << 0)
# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
# define HDMI_DEEP_COLOR_ENABLE (1 << 24)
# define HDMI_DEEP_COLOR_DEPTH (((x) & 3) << 28)
# define HDMI_24BIT_DEEP_COLOR 0
# define HDMI_30BIT_DEEP_COLOR 1
# define HDMI_36BIT_DEEP_COLOR 2
#define HDMI_STATUS 0x7034
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
# define HDMI_VBI_PACKET_ERROR (1 << 20)
#define HDMI_AUDIO_PACKET_CONTROL 0x7038
# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
#define HDMI_ACR_PACKET_CONTROL 0x703c
# define HDMI_ACR_SEND (1 << 0)
# define HDMI_ACR_CONT (1 << 1)
# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
# define HDMI_ACR_HW 0
# define HDMI_ACR_32 1
# define HDMI_ACR_44 2
# define HDMI_ACR_48 3
# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI_ACR_AUTO_SEND (1 << 12)
# define HDMI_ACR_N_MULTIPLE(x) (((x) & 7) << 16)
# define HDMI_ACR_X1 1
# define HDMI_ACR_X2 2
# define HDMI_ACR_X4 4
# define HDMI_ACR_AUDIO_PRIORITY (1 << 31)
#define HDMI_VBI_PACKET_CONTROL 0x7040
# define HDMI_NULL_SEND (1 << 0)
# define HDMI_GC_SEND (1 << 4)
# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
#define HDMI_INFOFRAME_CONTROL0 0x7044
# define HDMI_AVI_INFO_SEND (1 << 0)
# define HDMI_AVI_INFO_CONT (1 << 1)
# define HDMI_AUDIO_INFO_SEND (1 << 4)
# define HDMI_AUDIO_INFO_CONT (1 << 5)
# define HDMI_MPEG_INFO_SEND (1 << 8)
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7048
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x704c
# define HDMI_GENERIC0_SEND (1 << 0)
# define HDMI_GENERIC0_CONT (1 << 1)
# define HDMI_GENERIC1_SEND (1 << 4)
# define HDMI_GENERIC1_CONT (1 << 5)
# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
#define HDMI_GC 0x7058
# define HDMI_GC_AVMUTE (1 << 0)
# define HDMI_GC_AVMUTE_CONT (1 << 2)
#define AFMT_AUDIO_PACKET_CONTROL2 0x705c
# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
# define AFMT_60958_CS_SOURCE (1 << 4)
# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
#define AFMT_AVI_INFO0 0x7084
# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
# define AFMT_AVI_INFO_Y_RGB 0
# define AFMT_AVI_INFO_Y_YCBCR422 1
# define AFMT_AVI_INFO_Y_YCBCR444 2
# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
#define AFMT_AVI_INFO1 0x7088
# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_CN(x) (((x) & 0x3) << 12)
# define AFMT_AVI_INFO_YQ(x) (((x) & 0x3) << 14)
# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO2 0x708c
# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO3 0x7090
# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
#define AFMT_MPEG_INFO0 0x7094
# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
#define AFMT_MPEG_INFO1 0x7098
# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
#define AFMT_GENERIC0_HDR 0x709c
#define AFMT_GENERIC0_0 0x70a0
#define AFMT_GENERIC0_1 0x70a4
#define AFMT_GENERIC0_2 0x70a8
#define AFMT_GENERIC0_3 0x70ac
#define AFMT_GENERIC0_4 0x70b0
#define AFMT_GENERIC0_5 0x70b4
#define AFMT_GENERIC0_6 0x70b8
#define AFMT_GENERIC1_HDR 0x70bc
#define AFMT_GENERIC1_0 0x70c0
#define AFMT_GENERIC1_1 0x70c4
#define AFMT_GENERIC1_2 0x70c8
#define AFMT_GENERIC1_3 0x70cc
#define AFMT_GENERIC1_4 0x70d0
#define AFMT_GENERIC1_5 0x70d4
#define AFMT_GENERIC1_6 0x70d8
#define HDMI_ACR_32_0 0x70dc
# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_32_1 0x70e0
# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_44_0 0x70e4
# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_44_1 0x70e8
# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_48_0 0x70ec
# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_48_1 0x70f0
# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_STATUS_0 0x70f4
#define HDMI_ACR_STATUS_1 0x70f8
#define AFMT_AUDIO_INFO0 0x70fc
# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
# define AFMT_AUDIO_INFO_CT(x) (((x) & 0xf) << 11)
# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
# define AFMT_AUDIO_INFO_CXT(x) (((x) & 0x1f) << 24)
#define AFMT_AUDIO_INFO1 0x7100
# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
# define AFMT_AUDIO_INFO_LFEBPL(x) (((x) & 3) << 16)
#define AFMT_60958_0 0x7104
# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
#define AFMT_60958_1 0x7108
# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
#define AFMT_AUDIO_CRC_CONTROL 0x710c
# define AFMT_AUDIO_CRC_EN (1 << 0)
#define AFMT_RAMP_CONTROL0 0x7110
# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_RAMP_DATA_SIGN (1 << 31)
#define AFMT_RAMP_CONTROL1 0x7114
# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
#define AFMT_RAMP_CONTROL2 0x7118
# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_RAMP_CONTROL3 0x711c
# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_60958_2 0x7120
# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
#define AFMT_STATUS 0x7128
# define AFMT_AUDIO_ENABLE (1 << 4)
# define AFMT_AUDIO_HBR_ENABLE (1 << 8)
# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
#define AFMT_AUDIO_PACKET_CONTROL 0x712c
# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
# define AFMT_RESET_FIFO_WHEN_AUDIO_DIS (1 << 11) /* set to 1 */
# define AFMT_AUDIO_TEST_EN (1 << 12)
# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
# define AFMT_60958_CS_UPDATE (1 << 26)
# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
#define AFMT_VBI_PACKET_CONTROL 0x7130
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x7134
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - afmt regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7138
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
194,6 → 434,9
#define NOOFCHAN_MASK 0x00003000
#define MC_SHARED_CHREMAP 0x2008
 
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
#define BLACKOUT_MODE_MASK 0x00000007
 
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
230,6 → 473,7
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_MD_L1_TLB3_CNTL 0x2698
 
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
242,6 → 486,7
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
#define PA_SC_ENHANCE 0x8BF0
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
269,6 → 514,7
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
 
#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define NUMBER_OF_SETS(x) ((x) << 1)
319,6 → 565,8
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
337,6 → 585,10
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
#define SQ_STATIC_THREAD_MGMT_1 0x8E20
#define SQ_STATIC_THREAD_MGMT_2 0x8E24
#define SQ_STATIC_THREAD_MGMT_3 0x8E28
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
 
#define SQ_MS_FIFO_SIZES 0x8CF0
691,6 → 943,7
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
768,6 → 1021,8
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
 
#define VGT_VTX_VECT_EJECT_REG 0x88b0
 
#define SQ_CONST_MEM_BASE 0x8df8
 
#define SQ_ESGS_RING_BASE 0x8c40
892,19 → 1147,162
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
 
#define VGT_PRIMITIVE_TYPE 0x8958
#define VGT_INDEX_TYPE 0x895C
 
#define VGT_NUM_INDICES 0x8970
 
#define VGT_COMPUTE_DIM_X 0x8990
#define VGT_COMPUTE_DIM_Y 0x8994
#define VGT_COMPUTE_DIM_Z 0x8998
#define VGT_COMPUTE_START_X 0x899C
#define VGT_COMPUTE_START_Y 0x89A0
#define VGT_COMPUTE_START_Z 0x89A4
#define VGT_COMPUTE_INDEX 0x89A8
#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
#define VGT_HS_OFFCHIP_PARAM 0x89B0
 
#define DB_DEBUG 0x9830
#define DB_DEBUG2 0x9834
#define DB_DEBUG3 0x9838
#define DB_DEBUG4 0x983C
#define DB_WATERMARKS 0x9854
#define DB_DEPTH_CONTROL 0x28800
#define R_028800_DB_DEPTH_CONTROL 0x028800
#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
#define C_028800_Z_ENABLE 0xFFFFFFFD
#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
#define C_028800_ZFUNC 0xFFFFFF8F
#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
#define C_028800_STENCILFUNC 0xFFFFF8FF
#define V_028800_STENCILFUNC_NEVER 0x00000000
#define V_028800_STENCILFUNC_LESS 0x00000001
#define V_028800_STENCILFUNC_EQUAL 0x00000002
#define V_028800_STENCILFUNC_LEQUAL 0x00000003
#define V_028800_STENCILFUNC_GREATER 0x00000004
#define V_028800_STENCILFUNC_NOTEQUAL 0x00000005
#define V_028800_STENCILFUNC_GEQUAL 0x00000006
#define V_028800_STENCILFUNC_ALWAYS 0x00000007
#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
#define C_028800_STENCILFAIL 0xFFFFC7FF
#define V_028800_STENCIL_KEEP 0x00000000
#define V_028800_STENCIL_ZERO 0x00000001
#define V_028800_STENCIL_REPLACE 0x00000002
#define V_028800_STENCIL_INCR 0x00000003
#define V_028800_STENCIL_DECR 0x00000004
#define V_028800_STENCIL_INVERT 0x00000005
#define V_028800_STENCIL_INCR_WRAP 0x00000006
#define V_028800_STENCIL_DECR_WRAP 0x00000007
#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
#define C_028800_STENCILZPASS 0xFFFE3FFF
#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
#define C_028800_STENCILZFAIL 0xFFF1FFFF
#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#define DB_DEPTH_VIEW 0x28008
#define R_028008_DB_DEPTH_VIEW 0x00028008
#define S_028008_SLICE_START(x) (((x) & 0x7FF) << 0)
#define G_028008_SLICE_START(x) (((x) >> 0) & 0x7FF)
#define C_028008_SLICE_START 0xFFFFF800
#define S_028008_SLICE_MAX(x) (((x) & 0x7FF) << 13)
#define G_028008_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028008_SLICE_MAX 0xFF001FFF
#define DB_HTILE_DATA_BASE 0x28014
#define DB_HTILE_SURFACE 0x28abc
#define S_028ABC_HTILE_WIDTH(x) (((x) & 0x1) << 0)
#define G_028ABC_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
#define C_028ABC_HTILE_WIDTH 0xFFFFFFFE
#define S_028ABC_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
#define G_028ABC_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
#define C_028ABC_HTILE_HEIGHT 0xFFFFFFFD
#define G_028ABC_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
# define DB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
#define R_028040_DB_Z_INFO 0x028040
#define S_028040_FORMAT(x) (((x) & 0x3) << 0)
#define G_028040_FORMAT(x) (((x) >> 0) & 0x3)
#define C_028040_FORMAT 0xFFFFFFFC
#define V_028040_Z_INVALID 0x00000000
#define V_028040_Z_16 0x00000001
#define V_028040_Z_24 0x00000002
#define V_028040_Z_32_FLOAT 0x00000003
#define S_028040_ARRAY_MODE(x) (((x) & 0xF) << 4)
#define G_028040_ARRAY_MODE(x) (((x) >> 4) & 0xF)
#define C_028040_ARRAY_MODE 0xFFFFFF0F
#define S_028040_READ_SIZE(x) (((x) & 0x1) << 28)
#define G_028040_READ_SIZE(x) (((x) >> 28) & 0x1)
#define C_028040_READ_SIZE 0xEFFFFFFF
#define S_028040_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 29)
#define G_028040_TILE_SURFACE_ENABLE(x) (((x) >> 29) & 0x1)
#define C_028040_TILE_SURFACE_ENABLE 0xDFFFFFFF
#define S_028040_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
#define G_028040_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
#define C_028040_ZRANGE_PRECISION 0x7FFFFFFF
#define S_028040_TILE_SPLIT(x) (((x) & 0x7) << 8)
#define G_028040_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define S_028040_NUM_BANKS(x) (((x) & 0x3) << 12)
#define G_028040_NUM_BANKS(x) (((x) >> 12) & 0x3)
#define S_028040_BANK_WIDTH(x) (((x) & 0x3) << 16)
#define G_028040_BANK_WIDTH(x) (((x) >> 16) & 0x3)
#define S_028040_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define G_028040_BANK_HEIGHT(x) (((x) >> 20) & 0x3)
#define S_028040_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 24)
#define G_028040_MACRO_TILE_ASPECT(x) (((x) >> 24) & 0x3)
#define DB_STENCIL_INFO 0x28044
#define R_028044_DB_STENCIL_INFO 0x028044
#define S_028044_FORMAT(x) (((x) & 0x1) << 0)
#define G_028044_FORMAT(x) (((x) >> 0) & 0x1)
#define C_028044_FORMAT 0xFFFFFFFE
#define V_028044_STENCIL_INVALID 0
#define V_028044_STENCIL_8 1
#define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7)
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
#define DB_Z_WRITE_BASE 0x28050
#define DB_STENCIL_WRITE_BASE 0x28054
#define DB_DEPTH_SIZE 0x28058
#define R_028058_DB_DEPTH_SIZE 0x028058
#define S_028058_PITCH_TILE_MAX(x) (((x) & 0x7FF) << 0)
#define G_028058_PITCH_TILE_MAX(x) (((x) >> 0) & 0x7FF)
#define C_028058_PITCH_TILE_MAX 0xFFFFF800
#define S_028058_HEIGHT_TILE_MAX(x) (((x) & 0x7FF) << 11)
#define G_028058_HEIGHT_TILE_MAX(x) (((x) >> 11) & 0x7FF)
#define C_028058_HEIGHT_TILE_MAX 0xFFC007FF
#define R_02805C_DB_DEPTH_SLICE 0x02805C
#define S_02805C_SLICE_TILE_MAX(x) (((x) & 0x3FFFFF) << 0)
#define G_02805C_SLICE_TILE_MAX(x) (((x) >> 0) & 0x3FFFFF)
#define C_02805C_SLICE_TILE_MAX 0xFFC00000
 
#define SQ_PGM_START_PS 0x28840
#define SQ_PGM_START_VS 0x2885c
914,6 → 1312,14
#define SQ_PGM_START_HS 0x288b8
#define SQ_PGM_START_LS 0x288d0
 
#define VGT_STRMOUT_BUFFER_BASE_0 0x28AD8
#define VGT_STRMOUT_BUFFER_BASE_1 0x28AE8
#define VGT_STRMOUT_BUFFER_BASE_2 0x28AF8
#define VGT_STRMOUT_BUFFER_BASE_3 0x28B08
#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
#define VGT_STRMOUT_CONFIG 0x28b94
#define VGT_STRMOUT_BUFFER_CONFIG 0x28b98
 
940,13 → 1346,163
#define CB_COLOR0_PITCH 0x28c64
#define CB_COLOR0_SLICE 0x28c68
#define CB_COLOR0_VIEW 0x28c6c
#define R_028C6C_CB_COLOR0_VIEW 0x00028C6C
#define S_028C6C_SLICE_START(x) (((x) & 0x7FF) << 0)
#define G_028C6C_SLICE_START(x) (((x) >> 0) & 0x7FF)
#define C_028C6C_SLICE_START 0xFFFFF800
#define S_028C6C_SLICE_MAX(x) (((x) & 0x7FF) << 13)
#define G_028C6C_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028C6C_SLICE_MAX 0xFF001FFF
#define R_028C70_CB_COLOR0_INFO 0x028C70
#define S_028C70_ENDIAN(x) (((x) & 0x3) << 0)
#define G_028C70_ENDIAN(x) (((x) >> 0) & 0x3)
#define C_028C70_ENDIAN 0xFFFFFFFC
#define S_028C70_FORMAT(x) (((x) & 0x3F) << 2)
#define G_028C70_FORMAT(x) (((x) >> 2) & 0x3F)
#define C_028C70_FORMAT 0xFFFFFF03
#define V_028C70_COLOR_INVALID 0x00000000
#define V_028C70_COLOR_8 0x00000001
#define V_028C70_COLOR_4_4 0x00000002
#define V_028C70_COLOR_3_3_2 0x00000003
#define V_028C70_COLOR_16 0x00000005
#define V_028C70_COLOR_16_FLOAT 0x00000006
#define V_028C70_COLOR_8_8 0x00000007
#define V_028C70_COLOR_5_6_5 0x00000008
#define V_028C70_COLOR_6_5_5 0x00000009
#define V_028C70_COLOR_1_5_5_5 0x0000000A
#define V_028C70_COLOR_4_4_4_4 0x0000000B
#define V_028C70_COLOR_5_5_5_1 0x0000000C
#define V_028C70_COLOR_32 0x0000000D
#define V_028C70_COLOR_32_FLOAT 0x0000000E
#define V_028C70_COLOR_16_16 0x0000000F
#define V_028C70_COLOR_16_16_FLOAT 0x00000010
#define V_028C70_COLOR_8_24 0x00000011
#define V_028C70_COLOR_8_24_FLOAT 0x00000012
#define V_028C70_COLOR_24_8 0x00000013
#define V_028C70_COLOR_24_8_FLOAT 0x00000014
#define V_028C70_COLOR_10_11_11 0x00000015
#define V_028C70_COLOR_10_11_11_FLOAT 0x00000016
#define V_028C70_COLOR_11_11_10 0x00000017
#define V_028C70_COLOR_11_11_10_FLOAT 0x00000018
#define V_028C70_COLOR_2_10_10_10 0x00000019
#define V_028C70_COLOR_8_8_8_8 0x0000001A
#define V_028C70_COLOR_10_10_10_2 0x0000001B
#define V_028C70_COLOR_X24_8_32_FLOAT 0x0000001C
#define V_028C70_COLOR_32_32 0x0000001D
#define V_028C70_COLOR_32_32_FLOAT 0x0000001E
#define V_028C70_COLOR_16_16_16_16 0x0000001F
#define V_028C70_COLOR_16_16_16_16_FLOAT 0x00000020
#define V_028C70_COLOR_32_32_32_32 0x00000022
#define V_028C70_COLOR_32_32_32_32_FLOAT 0x00000023
#define V_028C70_COLOR_32_32_32_FLOAT 0x00000030
#define S_028C70_ARRAY_MODE(x) (((x) & 0xF) << 8)
#define G_028C70_ARRAY_MODE(x) (((x) >> 8) & 0xF)
#define C_028C70_ARRAY_MODE 0xFFFFF0FF
#define V_028C70_ARRAY_LINEAR_GENERAL 0x00000000
#define V_028C70_ARRAY_LINEAR_ALIGNED 0x00000001
#define V_028C70_ARRAY_1D_TILED_THIN1 0x00000002
#define V_028C70_ARRAY_2D_TILED_THIN1 0x00000004
#define S_028C70_NUMBER_TYPE(x) (((x) & 0x7) << 12)
#define G_028C70_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
#define C_028C70_NUMBER_TYPE 0xFFFF8FFF
#define V_028C70_NUMBER_UNORM 0x00000000
#define V_028C70_NUMBER_SNORM 0x00000001
#define V_028C70_NUMBER_USCALED 0x00000002
#define V_028C70_NUMBER_SSCALED 0x00000003
#define V_028C70_NUMBER_UINT 0x00000004
#define V_028C70_NUMBER_SINT 0x00000005
#define V_028C70_NUMBER_SRGB 0x00000006
#define V_028C70_NUMBER_FLOAT 0x00000007
#define S_028C70_COMP_SWAP(x) (((x) & 0x3) << 15)
#define G_028C70_COMP_SWAP(x) (((x) >> 15) & 0x3)
#define C_028C70_COMP_SWAP 0xFFFE7FFF
#define V_028C70_SWAP_STD 0x00000000
#define V_028C70_SWAP_ALT 0x00000001
#define V_028C70_SWAP_STD_REV 0x00000002
#define V_028C70_SWAP_ALT_REV 0x00000003
#define S_028C70_FAST_CLEAR(x) (((x) & 0x1) << 17)
#define G_028C70_FAST_CLEAR(x) (((x) >> 17) & 0x1)
#define C_028C70_FAST_CLEAR 0xFFFDFFFF
#define S_028C70_COMPRESSION(x) (((x) & 0x3) << 18)
#define G_028C70_COMPRESSION(x) (((x) >> 18) & 0x3)
#define C_028C70_COMPRESSION 0xFFF3FFFF
#define S_028C70_BLEND_CLAMP(x) (((x) & 0x1) << 19)
#define G_028C70_BLEND_CLAMP(x) (((x) >> 19) & 0x1)
#define C_028C70_BLEND_CLAMP 0xFFF7FFFF
#define S_028C70_BLEND_BYPASS(x) (((x) & 0x1) << 20)
#define G_028C70_BLEND_BYPASS(x) (((x) >> 20) & 0x1)
#define C_028C70_BLEND_BYPASS 0xFFEFFFFF
#define S_028C70_SIMPLE_FLOAT(x) (((x) & 0x1) << 21)
#define G_028C70_SIMPLE_FLOAT(x) (((x) >> 21) & 0x1)
#define C_028C70_SIMPLE_FLOAT 0xFFDFFFFF
#define S_028C70_ROUND_MODE(x) (((x) & 0x1) << 22)
#define G_028C70_ROUND_MODE(x) (((x) >> 22) & 0x1)
#define C_028C70_ROUND_MODE 0xFFBFFFFF
#define S_028C70_TILE_COMPACT(x) (((x) & 0x1) << 23)
#define G_028C70_TILE_COMPACT(x) (((x) >> 23) & 0x1)
#define C_028C70_TILE_COMPACT 0xFF7FFFFF
#define S_028C70_SOURCE_FORMAT(x) (((x) & 0x3) << 24)
#define G_028C70_SOURCE_FORMAT(x) (((x) >> 24) & 0x3)
#define C_028C70_SOURCE_FORMAT 0xFCFFFFFF
#define V_028C70_EXPORT_4C_32BPC 0x0
#define V_028C70_EXPORT_4C_16BPC 0x1
#define V_028C70_EXPORT_2C_32BPC 0x2 /* Do not use */
#define S_028C70_RAT(x) (((x) & 0x1) << 26)
#define G_028C70_RAT(x) (((x) >> 26) & 0x1)
#define C_028C70_RAT 0xFBFFFFFF
#define S_028C70_RESOURCE_TYPE(x) (((x) & 0x7) << 27)
#define G_028C70_RESOURCE_TYPE(x) (((x) >> 27) & 0x7)
#define C_028C70_RESOURCE_TYPE 0xC7FFFFFF
 
#define CB_COLOR0_INFO 0x28c70
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
# define CB_SOURCE_FORMAT(x) ((x) << 24)
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define R_028C74_CB_COLOR0_ATTRIB 0x028C74
#define S_028C74_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 4)
#define G_028C74_NON_DISP_TILING_ORDER(x) (((x) >> 4) & 0x1)
#define C_028C74_NON_DISP_TILING_ORDER 0xFFFFFFEF
#define S_028C74_TILE_SPLIT(x) (((x) & 0xf) << 5)
#define G_028C74_TILE_SPLIT(x) (((x) >> 5) & 0xf)
#define S_028C74_NUM_BANKS(x) (((x) & 0x3) << 10)
#define G_028C74_NUM_BANKS(x) (((x) >> 10) & 0x3)
#define S_028C74_BANK_WIDTH(x) (((x) & 0x3) << 13)
#define G_028C74_BANK_WIDTH(x) (((x) >> 13) & 0x3)
#define S_028C74_BANK_HEIGHT(x) (((x) & 0x3) << 16)
#define G_028C74_BANK_HEIGHT(x) (((x) >> 16) & 0x3)
#define S_028C74_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
#define G_028C74_MACRO_TILE_ASPECT(x) (((x) >> 19) & 0x3)
#define CB_COLOR0_ATTRIB 0x28c74
# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
# define ADDR_SURF_TILE_SPLIT_64B 0
# define ADDR_SURF_TILE_SPLIT_128B 1
# define ADDR_SURF_TILE_SPLIT_256B 2
# define ADDR_SURF_TILE_SPLIT_512B 3
# define ADDR_SURF_TILE_SPLIT_1KB 4
# define ADDR_SURF_TILE_SPLIT_2KB 5
# define ADDR_SURF_TILE_SPLIT_4KB 6
# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
# define ADDR_SURF_2_BANK 0
# define ADDR_SURF_4_BANK 1
# define ADDR_SURF_8_BANK 2
# define ADDR_SURF_16_BANK 3
# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
# define ADDR_SURF_BANK_WIDTH_1 0
# define ADDR_SURF_BANK_WIDTH_2 1
# define ADDR_SURF_BANK_WIDTH_4 2
# define ADDR_SURF_BANK_WIDTH_8 3
# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
# define ADDR_SURF_BANK_HEIGHT_1 0
# define ADDR_SURF_BANK_HEIGHT_2 1
# define ADDR_SURF_BANK_HEIGHT_4 2
# define ADDR_SURF_BANK_HEIGHT_8 3
# define CB_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 19)
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
1107,17 → 1663,226
#define CB_COLOR7_CLEAR_WORD3 0x28e3c
 
#define SQ_TEX_RESOURCE_WORD0_0 0x30000
# define TEX_DIM(x) ((x) << 0)
# define SQ_TEX_DIM_1D 0
# define SQ_TEX_DIM_2D 1
# define SQ_TEX_DIM_3D 2
# define SQ_TEX_DIM_CUBEMAP 3
# define SQ_TEX_DIM_1D_ARRAY 4
# define SQ_TEX_DIM_2D_ARRAY 5
# define SQ_TEX_DIM_2D_MSAA 6
# define SQ_TEX_DIM_2D_ARRAY_MSAA 7
#define SQ_TEX_RESOURCE_WORD1_0 0x30004
# define TEX_ARRAY_MODE(x) ((x) << 28)
#define SQ_TEX_RESOURCE_WORD2_0 0x30008
#define SQ_TEX_RESOURCE_WORD3_0 0x3000C
#define SQ_TEX_RESOURCE_WORD4_0 0x30010
# define TEX_DST_SEL_X(x) ((x) << 16)
# define TEX_DST_SEL_Y(x) ((x) << 19)
# define TEX_DST_SEL_Z(x) ((x) << 22)
# define TEX_DST_SEL_W(x) ((x) << 25)
# define SQ_SEL_X 0
# define SQ_SEL_Y 1
# define SQ_SEL_Z 2
# define SQ_SEL_W 3
# define SQ_SEL_0 4
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
# define MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define R_030000_SQ_TEX_RESOURCE_WORD0_0 0x030000
#define S_030000_DIM(x) (((x) & 0x7) << 0)
#define G_030000_DIM(x) (((x) >> 0) & 0x7)
#define C_030000_DIM 0xFFFFFFF8
#define V_030000_SQ_TEX_DIM_1D 0x00000000
#define V_030000_SQ_TEX_DIM_2D 0x00000001
#define V_030000_SQ_TEX_DIM_3D 0x00000002
#define V_030000_SQ_TEX_DIM_CUBEMAP 0x00000003
#define V_030000_SQ_TEX_DIM_1D_ARRAY 0x00000004
#define V_030000_SQ_TEX_DIM_2D_ARRAY 0x00000005
#define V_030000_SQ_TEX_DIM_2D_MSAA 0x00000006
#define V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
#define S_030000_NON_DISP_TILING_ORDER(x) (((x) & 0x1) << 5)
#define G_030000_NON_DISP_TILING_ORDER(x) (((x) >> 5) & 0x1)
#define C_030000_NON_DISP_TILING_ORDER 0xFFFFFFDF
#define S_030000_PITCH(x) (((x) & 0xFFF) << 6)
#define G_030000_PITCH(x) (((x) >> 6) & 0xFFF)
#define C_030000_PITCH 0xFFFC003F
#define S_030000_TEX_WIDTH(x) (((x) & 0x3FFF) << 18)
#define G_030000_TEX_WIDTH(x) (((x) >> 18) & 0x3FFF)
#define C_030000_TEX_WIDTH 0x0003FFFF
#define R_030004_SQ_TEX_RESOURCE_WORD1_0 0x030004
#define S_030004_TEX_HEIGHT(x) (((x) & 0x3FFF) << 0)
#define G_030004_TEX_HEIGHT(x) (((x) >> 0) & 0x3FFF)
#define C_030004_TEX_HEIGHT 0xFFFFC000
#define S_030004_TEX_DEPTH(x) (((x) & 0x1FFF) << 14)
#define G_030004_TEX_DEPTH(x) (((x) >> 14) & 0x1FFF)
#define C_030004_TEX_DEPTH 0xF8003FFF
#define S_030004_ARRAY_MODE(x) (((x) & 0xF) << 28)
#define G_030004_ARRAY_MODE(x) (((x) >> 28) & 0xF)
#define C_030004_ARRAY_MODE 0x0FFFFFFF
#define R_030008_SQ_TEX_RESOURCE_WORD2_0 0x030008
#define S_030008_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
#define G_030008_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_030008_BASE_ADDRESS 0x00000000
#define R_03000C_SQ_TEX_RESOURCE_WORD3_0 0x03000C
#define S_03000C_MIP_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
#define G_03000C_MIP_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
#define C_03000C_MIP_ADDRESS 0x00000000
#define R_030010_SQ_TEX_RESOURCE_WORD4_0 0x030010
#define S_030010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
#define G_030010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
#define C_030010_FORMAT_COMP_X 0xFFFFFFFC
#define V_030010_SQ_FORMAT_COMP_UNSIGNED 0x00000000
#define V_030010_SQ_FORMAT_COMP_SIGNED 0x00000001
#define V_030010_SQ_FORMAT_COMP_UNSIGNED_BIASED 0x00000002
#define S_030010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
#define G_030010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
#define C_030010_FORMAT_COMP_Y 0xFFFFFFF3
#define S_030010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
#define G_030010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
#define C_030010_FORMAT_COMP_Z 0xFFFFFFCF
#define S_030010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
#define G_030010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
#define C_030010_FORMAT_COMP_W 0xFFFFFF3F
#define S_030010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
#define G_030010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
#define C_030010_NUM_FORMAT_ALL 0xFFFFFCFF
#define V_030010_SQ_NUM_FORMAT_NORM 0x00000000
#define V_030010_SQ_NUM_FORMAT_INT 0x00000001
#define V_030010_SQ_NUM_FORMAT_SCALED 0x00000002
#define S_030010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
#define G_030010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
#define C_030010_SRF_MODE_ALL 0xFFFFFBFF
#define V_030010_SRF_MODE_ZERO_CLAMP_MINUS_ONE 0x00000000
#define V_030010_SRF_MODE_NO_ZERO 0x00000001
#define S_030010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
#define G_030010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
#define C_030010_FORCE_DEGAMMA 0xFFFFF7FF
#define S_030010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
#define G_030010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
#define C_030010_ENDIAN_SWAP 0xFFFFCFFF
#define S_030010_DST_SEL_X(x) (((x) & 0x7) << 16)
#define G_030010_DST_SEL_X(x) (((x) >> 16) & 0x7)
#define C_030010_DST_SEL_X 0xFFF8FFFF
#define V_030010_SQ_SEL_X 0x00000000
#define V_030010_SQ_SEL_Y 0x00000001
#define V_030010_SQ_SEL_Z 0x00000002
#define V_030010_SQ_SEL_W 0x00000003
#define V_030010_SQ_SEL_0 0x00000004
#define V_030010_SQ_SEL_1 0x00000005
#define S_030010_DST_SEL_Y(x) (((x) & 0x7) << 19)
#define G_030010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
#define C_030010_DST_SEL_Y 0xFFC7FFFF
#define S_030010_DST_SEL_Z(x) (((x) & 0x7) << 22)
#define G_030010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
#define C_030010_DST_SEL_Z 0xFE3FFFFF
#define S_030010_DST_SEL_W(x) (((x) & 0x7) << 25)
#define G_030010_DST_SEL_W(x) (((x) >> 25) & 0x7)
#define C_030010_DST_SEL_W 0xF1FFFFFF
#define S_030010_BASE_LEVEL(x) (((x) & 0xF) << 28)
#define G_030010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
#define C_030010_BASE_LEVEL 0x0FFFFFFF
#define R_030014_SQ_TEX_RESOURCE_WORD5_0 0x030014
#define S_030014_LAST_LEVEL(x) (((x) & 0xF) << 0)
#define G_030014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
#define C_030014_LAST_LEVEL 0xFFFFFFF0
#define S_030014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
#define G_030014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
#define C_030014_BASE_ARRAY 0xFFFE000F
#define S_030014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
#define G_030014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
#define C_030014_LAST_ARRAY 0xC001FFFF
#define R_030018_SQ_TEX_RESOURCE_WORD6_0 0x030018
#define S_030018_MAX_ANISO(x) (((x) & 0x7) << 0)
#define G_030018_MAX_ANISO(x) (((x) >> 0) & 0x7)
#define C_030018_MAX_ANISO 0xFFFFFFF8
#define S_030018_PERF_MODULATION(x) (((x) & 0x7) << 3)
#define G_030018_PERF_MODULATION(x) (((x) >> 3) & 0x7)
#define C_030018_PERF_MODULATION 0xFFFFFFC7
#define S_030018_INTERLACED(x) (((x) & 0x1) << 6)
#define G_030018_INTERLACED(x) (((x) >> 6) & 0x1)
#define C_030018_INTERLACED 0xFFFFFFBF
#define S_030018_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define G_030018_TILE_SPLIT(x) (((x) >> 29) & 0x7)
#define R_03001C_SQ_TEX_RESOURCE_WORD7_0 0x03001C
#define S_03001C_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 6)
#define G_03001C_MACRO_TILE_ASPECT(x) (((x) >> 6) & 0x3)
#define S_03001C_BANK_WIDTH(x) (((x) & 0x3) << 8)
#define G_03001C_BANK_WIDTH(x) (((x) >> 8) & 0x3)
#define S_03001C_BANK_HEIGHT(x) (((x) & 0x3) << 10)
#define G_03001C_BANK_HEIGHT(x) (((x) >> 10) & 0x3)
#define S_03001C_NUM_BANKS(x) (((x) & 0x3) << 16)
#define G_03001C_NUM_BANKS(x) (((x) >> 16) & 0x3)
#define S_03001C_TYPE(x) (((x) & 0x3) << 30)
#define G_03001C_TYPE(x) (((x) >> 30) & 0x3)
#define C_03001C_TYPE 0x3FFFFFFF
#define V_03001C_SQ_TEX_VTX_INVALID_TEXTURE 0x00000000
#define V_03001C_SQ_TEX_VTX_INVALID_BUFFER 0x00000001
#define V_03001C_SQ_TEX_VTX_VALID_TEXTURE 0x00000002
#define V_03001C_SQ_TEX_VTX_VALID_BUFFER 0x00000003
#define S_03001C_DATA_FORMAT(x) (((x) & 0x3F) << 0)
#define G_03001C_DATA_FORMAT(x) (((x) >> 0) & 0x3F)
#define C_03001C_DATA_FORMAT 0xFFFFFFC0
 
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
#define SQ_VTX_CONSTANT_WORD2_0 0x30008
# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
# define SQ_VTXC_STRIDE(x) ((x) << 8)
# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
# define SQ_ENDIAN_NONE 0
# define SQ_ENDIAN_8IN16 1
# define SQ_ENDIAN_8IN32 2
#define SQ_VTX_CONSTANT_WORD3_0 0x3000C
# define SQ_VTCX_SEL_X(x) ((x) << 3)
# define SQ_VTCX_SEL_Y(x) ((x) << 6)
# define SQ_VTCX_SEL_Z(x) ((x) << 9)
# define SQ_VTCX_SEL_W(x) ((x) << 12)
#define SQ_VTX_CONSTANT_WORD4_0 0x30010
#define SQ_VTX_CONSTANT_WORD5_0 0x30014
#define SQ_VTX_CONSTANT_WORD6_0 0x30018
#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
 
#define TD_PS_BORDER_COLOR_INDEX 0xA400
#define TD_PS_BORDER_COLOR_RED 0xA404
#define TD_PS_BORDER_COLOR_GREEN 0xA408
#define TD_PS_BORDER_COLOR_BLUE 0xA40C
#define TD_PS_BORDER_COLOR_ALPHA 0xA410
#define TD_VS_BORDER_COLOR_INDEX 0xA414
#define TD_VS_BORDER_COLOR_RED 0xA418
#define TD_VS_BORDER_COLOR_GREEN 0xA41C
#define TD_VS_BORDER_COLOR_BLUE 0xA420
#define TD_VS_BORDER_COLOR_ALPHA 0xA424
#define TD_GS_BORDER_COLOR_INDEX 0xA428
#define TD_GS_BORDER_COLOR_RED 0xA42C
#define TD_GS_BORDER_COLOR_GREEN 0xA430
#define TD_GS_BORDER_COLOR_BLUE 0xA434
#define TD_GS_BORDER_COLOR_ALPHA 0xA438
#define TD_HS_BORDER_COLOR_INDEX 0xA43C
#define TD_HS_BORDER_COLOR_RED 0xA440
#define TD_HS_BORDER_COLOR_GREEN 0xA444
#define TD_HS_BORDER_COLOR_BLUE 0xA448
#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
#define TD_LS_BORDER_COLOR_INDEX 0xA450
#define TD_LS_BORDER_COLOR_RED 0xA454
#define TD_LS_BORDER_COLOR_GREEN 0xA458
#define TD_LS_BORDER_COLOR_BLUE 0xA45C
#define TD_LS_BORDER_COLOR_ALPHA 0xA460
#define TD_CS_BORDER_COLOR_INDEX 0xA464
#define TD_CS_BORDER_COLOR_RED 0xA468
#define TD_CS_BORDER_COLOR_GREEN 0xA46C
#define TD_CS_BORDER_COLOR_BLUE 0xA470
#define TD_CS_BORDER_COLOR_ALPHA 0xA474
 
/* cayman 3D regs */
#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
/drivers/video/drm/radeon/firmware/PITCAIRN_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/PITCAIRN_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/TAHITI_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_ce.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_mc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_me.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_pfp.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/firmware/VERDE_rlc.bin
Cannot display: file marked as a binary type.
svn:mime-type = application/octet-stream
Property changes:
Added: svn:mime-type
+application/octet-stream
\ No newline at end of property
/drivers/video/drm/radeon/fwblob.asm
119,7 → 119,7
dd (SUMO2ME_END - SUMO2ME_START)
 
 
macro ni_code [arg]
macro NI_code [arg]
{
dd FIRMWARE_#arg#_ME
dd arg#ME_START
135,7 → 135,7
 
}
 
ni_code BARTS, TURKS, CAICOS, CAYMAN
NI_code BARTS, TURKS, CAICOS, CAYMAN
 
dd FIRMWARE_RV610_PFP
dd RV610PFP_START
235,7 → 235,32
dd SUMORLC_START
dd (SUMORLC_END - SUMORLC_START)
 
macro SI_code [arg]
{
dd FIRMWARE_#arg#_PFP
dd arg#_PFP_START
dd (arg#_PFP_END - arg#_PFP_START)
 
dd FIRMWARE_#arg#_ME
dd arg#_ME_START
dd (arg#_ME_END - arg#_ME_START)
 
dd FIRMWARE_#arg#_CE
dd arg#_CE_START
dd (arg#_CE_END - arg#_CE_START)
 
dd FIRMWARE_#arg#_MC
dd arg#_MC_START
dd (arg#_MC_END - arg#_MC_START)
 
dd FIRMWARE_#arg#_RLC
dd arg#_RLC_START
dd (arg#_RLC_END - arg#_RLC_START)
 
}
 
SI_code TAHITI, PITCAIRN, VERDE
 
___end_builtin_fw:
 
 
315,8 → 340,49
FIRMWARE_CAICOS_MC db 'radeon/CAICOS_mc.bin',0
FIRMWARE_CAYMAN_MC db 'radeon/CAYMAN_mc.bin',0
 
macro SI_firmware [arg]
{
 
forward
 
FIRMWARE_#arg#_PFP db 'radeon/',`arg,'_pfp.bin',0
FIRMWARE_#arg#_ME db 'radeon/',`arg,'_me.bin',0
FIRMWARE_#arg#_CE db 'radeon/',`arg,'_ce.bin',0
FIRMWARE_#arg#_MC db 'radeon/',`arg,'_mc.bin',0
FIRMWARE_#arg#_RLC db 'radeon/',`arg,'_rlc.bin',0
 
forward
 
align 16
arg#_PFP_START:
file "firmware/"#`arg#"_pfp.bin"
arg#_PFP_END:
 
align 16
arg#_ME_START:
file "firmware/"#`arg#"_me.bin"
arg#_ME_END:
 
align 16
arg#_CE_START:
file "firmware/"#`arg#"_ce.bin"
arg#_CE_END:
 
align 16
arg#_MC_START:
file "firmware/"#`arg#"_mc.bin"
arg#_MC_END:
 
align 16
arg#_RLC_START:
file "firmware/"#`arg#"_rlc.bin"
arg#_RLC_END:
 
}
 
SI_firmware TAHITI,PITCAIRN,VERDE
 
align 16
R100CP_START:
file 'firmware/R100_cp.bin'
R100CP_END:
627,3 → 693,5
CAYMANMC_START:
file 'firmware/CAYMAN_mc.bin'
CAYMANMC_END:
 
 
/drivers/video/drm/radeon/ni.c
24,10 → 24,11
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include "drmP.h"
#include <linux/module.h>
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include <drm/radeon_drm.h>
#include "nid.h"
#include "atom.h"
#include "ni_reg.h"
39,6 → 40,10
extern void evergreen_mc_program(struct radeon_device *rdev);
extern void evergreen_irq_suspend(struct radeon_device *rdev);
extern int evergreen_mc_init(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
extern void si_rlc_fini(struct radeon_device *rdev);
extern int si_rlc_init(struct radeon_device *rdev);
 
#define EVERGREEN_PFP_UCODE_SIZE 1120
#define EVERGREEN_PM4_UCODE_SIZE 1376
50,6 → 55,8
#define CAYMAN_RLC_UCODE_SIZE 1024
#define CAYMAN_MC_UCODE_SIZE 6037
 
#define ARUBA_RLC_UCODE_SIZE 1536
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/BARTS_pfp.bin");
MODULE_FIRMWARE("radeon/BARTS_me.bin");
65,6 → 72,9
MODULE_FIRMWARE("radeon/CAYMAN_me.bin");
MODULE_FIRMWARE("radeon/CAYMAN_mc.bin");
MODULE_FIRMWARE("radeon/CAYMAN_rlc.bin");
MODULE_FIRMWARE("radeon/ARUBA_pfp.bin");
MODULE_FIRMWARE("radeon/ARUBA_me.bin");
MODULE_FIRMWARE("radeon/ARUBA_rlc.bin");
 
#define BTC_IO_MC_REGS_SIZE 29
 
259,8 → 269,11
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
 
/* wait for training to complete */
while (!(RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD))
udelay(10);
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(MC_IO_PAD_CNTL_D0) & MEM_FALL_OUT_CMD)
break;
udelay(1);
}
 
if (running)
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
320,6 → 333,15
rlc_req_size = CAYMAN_RLC_UCODE_SIZE * 4;
mc_req_size = CAYMAN_MC_UCODE_SIZE * 4;
break;
case CHIP_ARUBA:
chip_name = "ARUBA";
rlc_chip_name = "ARUBA";
/* pfp/me same size as CAYMAN */
pfp_req_size = CAYMAN_PFP_UCODE_SIZE * 4;
me_req_size = CAYMAN_PM4_UCODE_SIZE * 4;
rlc_req_size = ARUBA_RLC_UCODE_SIZE * 4;
mc_req_size = 0;
break;
default: BUG();
}
 
359,6 → 381,8
err = -EINVAL;
}
 
/* no MC ucode on TN */
if (!(rdev->flags & RADEON_IS_IGP)) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
if (err)
369,6 → 393,7
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
}
out:
platform_device_unregister(pdev);
 
392,249 → 417,21
/*
* Core functions
*/
static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends_per_asic,
u32 *backend_disable_mask_per_asic,
u32 num_shader_engines)
{
u32 backend_map = 0;
u32 enabled_backends_mask = 0;
u32 enabled_backends_count = 0;
u32 num_backends_per_se;
u32 cur_pipe;
u32 swizzle_pipe[CAYMAN_MAX_PIPES];
u32 cur_backend = 0;
u32 i;
bool force_no_swizzle;
 
/* force legal values */
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
num_tile_pipes = rdev->config.cayman.max_tile_pipes;
if (num_shader_engines < 1)
num_shader_engines = 1;
if (num_shader_engines > rdev->config.cayman.max_shader_engines)
num_shader_engines = rdev->config.cayman.max_shader_engines;
if (num_backends_per_asic < num_shader_engines)
num_backends_per_asic = num_shader_engines;
if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
 
/* make sure we have the same number of backends per se */
num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
/* set up the number of backends per se */
num_backends_per_se = num_backends_per_asic / num_shader_engines;
if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
num_backends_per_se = rdev->config.cayman.max_backends_per_se;
num_backends_per_asic = num_backends_per_se * num_shader_engines;
}
 
/* create enable mask and count for enabled backends */
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends_per_asic)
break;
}
 
/* force the backends mask to match the current number of backends */
if (enabled_backends_count != num_backends_per_asic) {
u32 this_backend_enabled;
u32 shader_engine;
u32 backend_per_se;
 
enabled_backends_mask = 0;
enabled_backends_count = 0;
*backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
/* calc the current se */
shader_engine = i / rdev->config.cayman.max_backends_per_se;
/* calc the backend per se */
backend_per_se = i % rdev->config.cayman.max_backends_per_se;
/* default to not enabled */
this_backend_enabled = 0;
if ((shader_engine < num_shader_engines) &&
(backend_per_se < num_backends_per_se))
this_backend_enabled = 1;
if (this_backend_enabled) {
enabled_backends_mask |= (1 << i);
*backend_disable_mask_per_asic &= ~(1 << i);
++enabled_backends_count;
}
}
}
 
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
switch (rdev->family) {
case CHIP_CAYMAN:
force_no_swizzle = true;
break;
default:
force_no_swizzle = false;
break;
}
if (force_no_swizzle) {
bool last_backend_enabled = false;
 
force_no_swizzle = false;
for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
if (((enabled_backends_mask >> i) & 1) == 1) {
if (last_backend_enabled)
force_no_swizzle = true;
last_backend_enabled = true;
} else
last_backend_enabled = false;
}
}
 
switch (num_tile_pipes) {
case 1:
case 3:
case 5:
case 7:
DRM_ERROR("odd number of pipes!\n");
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
swizzle_pipe[3] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
}
break;
}
 
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
 
backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
 
cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
}
 
return backend_map;
}
 
static void cayman_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
case 2:
case 3:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
}
 
switch (rdev->family) {
case CHIP_CAYMAN:
default:
//tcp_chan_steer_lo = 0x54763210
tcp_chan_steer_lo = 0x76543210;
tcp_chan_steer_hi = 0x0000ba98;
break;
}
 
WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
u32 disable_mask_per_se,
u32 max_disable_mask_per_se,
u32 num_shader_engines)
{
u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
 
if (num_shader_engines == 1)
return disable_mask_per_asic;
else if (num_shader_engines == 2)
return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
else
return 0xffffffff;
}
 
static void cayman_gpu_init(struct radeon_device *rdev)
{
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config;
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 gb_backend_map;
u32 cgts_tcc_disable;
u32 sx_debug_1;
u32 smx_dc_ctl0;
u32 gc_user_shader_pipe_config;
u32 gc_user_rb_backend_disable;
u32 cgts_user_tcc_disable;
u32 cgts_sm_ctrl_reg;
u32 hdp_host_path_cntl;
u32 tmp;
u32 disabled_rb_mask;
int i, j;
 
switch (rdev->family) {
case CHIP_CAYMAN:
default:
rdev->config.cayman.max_shader_engines = 2;
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 8;
655,9 → 452,61
rdev->config.cayman.sc_prim_fifo_size = 0x100;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_ARUBA:
default:
rdev->config.cayman.max_shader_engines = 1;
rdev->config.cayman.max_pipes_per_simd = 4;
rdev->config.cayman.max_tile_pipes = 2;
if ((rdev->pdev->device == 0x9900) ||
(rdev->pdev->device == 0x9901) ||
(rdev->pdev->device == 0x9905) ||
(rdev->pdev->device == 0x9906) ||
(rdev->pdev->device == 0x9907) ||
(rdev->pdev->device == 0x9908) ||
(rdev->pdev->device == 0x9909) ||
(rdev->pdev->device == 0x9910) ||
(rdev->pdev->device == 0x9917)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
(rdev->pdev->device == 0x9913) ||
(rdev->pdev->device == 0x9918)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
(rdev->pdev->device == 0x9994) ||
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
rdev->config.cayman.max_threads = 256;
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
 
rdev->config.cayman.sc_prim_fifo_size = 0x40;
rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
break;
}
 
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
669,40 → 518,11
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
evergreen_fix_pci_max_read_req_size(rdev);
 
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
cgts_tcc_disable = 0xff000000;
gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
 
rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
rdev->config.cayman.backend_disable_mask_per_asic =
cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
rdev->config.cayman.num_shader_engines);
rdev->config.cayman.backend_map =
cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
rdev->config.cayman.num_backends_per_se *
rdev->config.cayman.num_shader_engines,
&rdev->config.cayman.backend_disable_mask_per_asic,
rdev->config.cayman.num_shader_engines);
tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
rdev->config.cayman.mem_max_burst_length_bytes = 512;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.cayman.mem_row_size_in_kb > 4)
712,73 → 532,6
rdev->config.cayman.num_gpus = 1;
rdev->config.cayman.multi_gpu_tile_size = 64;
 
//gb_addr_config = 0x02011003
#if 0
gb_addr_config = RREG32(GB_ADDR_CONFIG);
#else
gb_addr_config = 0;
switch (rdev->config.cayman.num_tile_pipes) {
case 1:
default:
gb_addr_config |= NUM_PIPES(0);
break;
case 2:
gb_addr_config |= NUM_PIPES(1);
break;
case 4:
gb_addr_config |= NUM_PIPES(2);
break;
case 8:
gb_addr_config |= NUM_PIPES(3);
break;
}
 
tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
switch (rdev->config.cayman.num_gpus) {
case 1:
default:
gb_addr_config |= NUM_GPUS(0);
break;
case 2:
gb_addr_config |= NUM_GPUS(1);
break;
case 4:
gb_addr_config |= NUM_GPUS(2);
break;
}
switch (rdev->config.cayman.multi_gpu_tile_size) {
case 16:
gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
break;
case 32:
default:
gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
break;
case 64:
gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
break;
case 128:
gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
break;
}
switch (rdev->config.cayman.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config |= ROW_SIZE(0);
break;
case 2:
gb_addr_config |= ROW_SIZE(1);
break;
case 4:
gb_addr_config |= ROW_SIZE(2);
break;
}
#endif
 
tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
rdev->config.cayman.num_tile_pipes = (1 << tmp);
tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
792,17 → 545,7
tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
 
//gb_backend_map = 0x76541032;
#if 0
gb_backend_map = RREG32(GB_BACKEND_MAP);
#else
gb_backend_map =
cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
rdev->config.cayman.num_backends_per_se *
rdev->config.cayman.num_shader_engines,
&rdev->config.cayman.backend_disable_mask_per_asic,
rdev->config.cayman.num_shader_engines);
#endif
 
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
826,34 → 569,61
rdev->config.cayman.tile_config |= (3 << 0);
break;
}
 
/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
if (rdev->flags & RADEON_IS_IGP)
rdev->config.cayman.tile_config |= 1 << 4;
else {
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.cayman.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.cayman.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.cayman.tile_config |= 2 << 4;
break;
}
}
rdev->config.cayman.tile_config |=
((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
rdev->config.cayman.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.cayman.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
rdev->config.cayman.backend_map = gb_backend_map;
WREG32(GB_BACKEND_MAP, gb_backend_map);
tmp = 0;
for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
u32 rb_disable_bitmap;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
tmp <<= 4;
tmp |= rb_disable_bitmap;
}
/* enabled rb are just the one not disabled :) */
disabled_rb_mask = tmp;
 
WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
 
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
cayman_program_channel_remap(rdev);
tmp = gb_addr_config & NUM_PIPES_MASK;
tmp = r6xx_remap_render_backend(rdev, tmp,
rdev->config.cayman.max_backends_per_se *
rdev->config.cayman.max_shader_engines,
CAYMAN_MAX_BACKENDS, disabled_rb_mask);
WREG32(GB_BACKEND_MAP, tmp);
 
/* primary versions */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
cgts_tcc_disable = 0xffff0000;
for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
cgts_tcc_disable &= ~(1 << (16 + i));
WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
 
/* user versions */
WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
 
956,11 → 726,11
WREG32(VM_INVALIDATE_REQUEST, 1);
}
 
int cayman_pcie_gart_enable(struct radeon_device *rdev)
static int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
int r;
int i, r;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
969,9 → 739,12
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
991,19 → 764,41
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
/* disable context1-7 */
 
WREG32(0x15D4, 0);
WREG32(0x15D8, 0);
WREG32(0x15DC, 0);
 
/* empty context1-7 */
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 8; i++) {
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12);
}
 
/* enable context1-7 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
void cayman_pcie_gart_disable(struct radeon_device *rdev)
static void cayman_pcie_gart_disable(struct radeon_device *rdev)
{
int r;
 
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
1019,20 → 814,82
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(6));
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_gart_table_vram_unpin(rdev);
}
 
void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
int ring, u32 cp_int_cntl)
{
u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
 
WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
WREG32(CP_INT_CNTL, cp_int_cntl);
}
}
 
 
/*
* CP.
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
}
 
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
 
/* set to DX10/11 mode */
radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
radeon_ring_write(ring, 1);
 
if (ring->rptr_save_reg) {
uint32_t next_rptr = ring->wptr + 3 + 4 + 8;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw |
(ib->vm ? (ib->vm->id << 24) : 0));
 
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
}
 
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
1072,25 → 929,26
 
static int cayman_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
 
r = radeon_ring_lock(rdev, 7);
r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
 
cayman_cp_enable(rdev, true);
 
r = radeon_ring_lock(rdev, cayman_default_size + 19);
r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
1097,38 → 955,38
}
 
/* setup clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
for (i = 0; i < cayman_default_size; i++)
radeon_ring_write(rdev, cayman_default_state[i]);
radeon_ring_write(ring, cayman_default_state[i]);
 
radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
/* set clear context state */
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
 
/* SQ_VTX_BASE_VTX_LOC */
radeon_ring_write(rdev, 0xc0026f00);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(rdev, 0x00000000);
radeon_ring_write(ring, 0xc0026f00);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
radeon_ring_write(ring, 0x00000000);
 
/* Clear consts */
radeon_ring_write(rdev, 0xc0036f00);
radeon_ring_write(rdev, 0x00000bc4);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(rdev, 0xffffffff);
radeon_ring_write(ring, 0xc0036f00);
radeon_ring_write(ring, 0x00000bc4);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
radeon_ring_write(ring, 0xffffffff);
 
radeon_ring_write(rdev, 0xc0026900);
radeon_ring_write(rdev, 0x00000316);
radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(rdev, 0x00000010); /* */
radeon_ring_write(ring, 0xc0026900);
radeon_ring_write(ring, 0x00000316);
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* */
 
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
 
/* XXX init other rings */
 
1136,12 → 994,35
}
 
 
 
int cayman_cp_resume(struct radeon_device *rdev)
static int cayman_cp_resume(struct radeon_device *rdev)
{
u32 tmp;
u32 rb_bufsz;
int r;
static const int ridx[] = {
RADEON_RING_TYPE_GFX_INDEX,
CAYMAN_RING_TYPE_CP1_INDEX,
CAYMAN_RING_TYPE_CP2_INDEX
};
static const unsigned cp_rb_cntl[] = {
CP_RB0_CNTL,
CP_RB1_CNTL,
CP_RB2_CNTL,
};
static const unsigned cp_rb_rptr_addr[] = {
CP_RB0_RPTR_ADDR,
CP_RB1_RPTR_ADDR,
CP_RB2_RPTR_ADDR
};
static const unsigned cp_rb_rptr_addr_hi[] = {
CP_RB0_RPTR_ADDR_HI,
CP_RB1_RPTR_ADDR_HI,
CP_RB2_RPTR_ADDR_HI
};
static const unsigned cp_rb_base[] = {
CP_RB0_BASE,
CP_RB1_BASE,
CP_RB2_BASE
};
struct radeon_ring *ring;
int i, r;
 
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
1155,7 → 1036,8
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
 
WREG32(CP_SEM_WAIT_TIMER, 0x4);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
1162,100 → 1044,59
 
WREG32(CP_DEBUG, (1 << 27));
 
/* ring 0 - compute and gfx */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB0_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB0_WPTR, 0);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
 
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
for (i = 0; i < 3; ++i) {
uint32_t rb_cntl;
uint64_t addr;
 
WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
 
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
rdev->cp.wptr = RREG32(CP_RB0_WPTR);
 
/* ring1 - compute only */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
ring = &rdev->ring[ridx[i]];
rb_cntl = drm_order(ring->ring_size / 8);
rb_cntl |= drm_order(RADEON_GPU_PAGE_SIZE/8) << 8;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
rb_cntl |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB1_CNTL, tmp);
WREG32(cp_rb_cntl[i], rb_cntl);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB1_WPTR, 0);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
addr = rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET;
WREG32(cp_rb_rptr_addr[i], addr & 0xFFFFFFFC);
WREG32(cp_rb_rptr_addr_hi[i], upper_32_bits(addr) & 0xFF);
}
 
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
/* set the rb base addr, this causes an internal reset of ALL rings */
for (i = 0; i < 3; ++i) {
ring = &rdev->ring[ridx[i]];
WREG32(cp_rb_base[i], ring->gpu_addr >> 8);
}
 
WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
 
rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
rdev->cp1.wptr = RREG32(CP_RB1_WPTR);
 
/* ring2 - compute only */
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB2_CNTL, tmp);
 
for (i = 0; i < 3; ++i) {
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB2_WPTR, 0);
ring = &rdev->ring[ridx[i]];
WREG32_P(cp_rb_cntl[i], RB_RPTR_WR_ENA, ~RB_RPTR_WR_ENA);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
ring->rptr = ring->wptr = 0;
WREG32(ring->rptr_reg, ring->rptr);
WREG32(ring->wptr_reg, ring->wptr);
 
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
WREG32_P(cp_rb_cntl[i], 0, ~RB_RPTR_WR_ENA);
}
 
WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
 
rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
rdev->cp2.wptr = RREG32(CP_RB2_WPTR);
 
/* start the rings */
cayman_cp_start(rdev);
rdev->cp.ready = true;
rdev->cp1.ready = true;
rdev->cp2.ready = true;
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
/* this only test cp0 */
r = radeon_ring_test(rdev);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
rdev->cp.ready = false;
rdev->cp1.ready = false;
rdev->cp2.ready = false;
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r;
}
 
1262,35 → 1103,6
return 0;
}
 
bool cayman_gpu_is_lockup(struct radeon_device *rdev)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status_se0, grbm_status_se1;
struct r100_gpu_lockup *lockup = &rdev->config.cayman.lockup;
int r;
 
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
/* XXX deal with CP0,1,2 */
rdev->cp.rptr = RREG32(CP_RB0_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
1308,6 → 1120,23
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14F8));
dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14D8));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
RREG32(0x14FC));
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
RREG32(0x14DC));
 
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
1338,6 → 1167,7
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
 
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
1346,6 → 1176,14
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
evergreen_mc_resume(rdev, &save);
return 0;
}
1357,8 → 1195,21
 
static int cayman_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
evergreen_pcie_gen2_enable(rdev);
 
if (rdev->flags & RADEON_IS_IGP) {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
r = ni_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
} else {
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) {
r = ni_init_microcode(rdev);
if (r) {
1366,12 → 1217,18
return r;
}
}
 
r = ni_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
evergreen_mc_program(rdev);
r = cayman_pcie_gart_enable(rdev);
if (r)
1380,11 → 1237,20
 
r = evergreen_blit_init(rdev);
if (r) {
// evergreen_blit_fini(rdev);
rdev->asic->copy = NULL;
// r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate rlc buffers */
if (rdev->flags & RADEON_IS_IGP) {
r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
}
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
1399,7 → 1265,9
}
evergreen_irq_set(rdev);
 
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
1424,12 → 1292,9
*/
int cayman_init(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
1476,8 → 1341,8
if (r)
return r;
 
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
1492,24 → 1357,15
dev_err(rdev->dev, "disabling GPU acceleration\n");
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
rdev->accel_working = false;
}
r = r600_ib_test(rdev);
if (r) {
DRM_ERROR("radeon: failed testing IB (%d).\n", r);
rdev->accel_working = false;
}
}
 
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
* is loaded are not suffient for advanced operations.
*
* We can skip this check for TN, because there is no MC
* ucode.
*/
if (!rdev->mc_fw) {
if (!rdev->mc_fw && !(rdev->flags & RADEON_IS_IGP)) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
1517,3 → 1373,119
return 0;
}
 
/*
* vm
*/
int cayman_vm_init(struct radeon_device *rdev)
{
/* number of VMs */
rdev->vm_manager.nvm = 8;
/* base offset of vram pages */
if (rdev->flags & RADEON_IS_IGP) {
u64 tmp = RREG32(FUS_MC_VM_FB_OFFSET);
tmp <<= 22;
rdev->vm_manager.vram_base_offset = tmp;
} else
rdev->vm_manager.vram_base_offset = 0;
return 0;
}
 
void cayman_vm_fini(struct radeon_device *rdev)
{
}
 
#define R600_ENTRY_VALID (1 << 0)
#define R600_PTE_SYSTEM (1 << 1)
#define R600_PTE_SNOOPED (1 << 2)
#define R600_PTE_READABLE (1 << 5)
#define R600_PTE_WRITEABLE (1 << 6)
 
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags)
{
uint32_t r600_flags = 0;
r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_ENTRY_VALID : 0;
r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
r600_flags |= R600_PTE_SYSTEM;
r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
}
return r600_flags;
}
 
/**
* cayman_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using the CP (cayman-si).
*/
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 
while (count) {
unsigned ndw = 1 + count * 2;
if (ndw > 0x3FFF)
ndw = 0x3FFF;
 
radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
radeon_ring_write(ring, pe);
radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
for (; ndw > 1; ndw -= 2, --count, pe += 8) {
uint64_t value = 0;
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
addr += incr;
 
} else if (flags & RADEON_VM_PAGE_VALID) {
value = addr;
addr += incr;
}
 
value |= r600_flags;
radeon_ring_write(ring, value);
radeon_ring_write(ring, upper_32_bits(value));
}
}
}
 
/**
* cayman_vm_flush - vm flush using the CP
*
* @rdev: radeon_device pointer
*
* Update the page table base and flush the VM TLB
* using the CP (cayman-si).
*/
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
radeon_ring_write(ring, 0x1);
 
/* bits 0-7 are the VM contexts0-7 */
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm->id);
 
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
/drivers/video/drm/radeon/nid.h
41,7 → 41,13
#define CAYMAN_MAX_TCC 16
#define CAYMAN_MAX_TCC_MASK 0xFF
 
#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
 
#define DMIF_ADDR_CONFIG 0xBD4
#define SRBM_GFX_CNTL 0x0E44
#define RINGID(x) (((x) & 0x3) << 0)
#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
 
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
103,6 → 109,7
#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
#define FUS_MC_VM_FB_OFFSET 0x2068
 
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
#define MC_ARB_RAMCFG 0x2760
144,6 → 151,8
#define CGTS_SYS_TCC_DISABLE 0x3F90
#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
 
#define RLC_GFX_INDEX 0x3FC4
 
#define CONFIG_MEMSIZE 0x5428
 
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
208,6 → 217,12
#define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15)
 
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SE_INDEX(x) ((x) << 16)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
 
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
219,6 → 234,12
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define CP_SEM_WAIT_TIMER 0x85BC
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_COHER_CNTL2 0x85E8
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
394,6 → 415,12
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
 
#define CP_INT_CNTL 0xC124
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define TIME_STAMP_INT_ENABLE (1 << 26)
 
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
411,6 → 438,10
#define CP_ME_RAM_DATA 0xC160
#define CP_DEBUG 0xC1FC
 
#define VGT_EVENT_INITIATOR 0x28a90
# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
 
/*
* PM4
*/
445,6 → 476,7
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
470,6 → 502,7
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
494,7 → 527,27
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
/* 0 - any non-TS event
* 1 - ZPASS_DONE
* 2 - SAMPLE_PIPELINESTAT
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
* 5 - TS events
*/
#define PACKET3_EVENT_WRITE_EOP 0x47
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
* 2 - send 64bit data
* 3 - send 64bit counter value
*/
#define INT_SEL(x) ((x) << 24)
/* 0 - none
* 1 - interrupt only (DATA_SEL = 0)
* 2 - interrupt when data write is confirmed
*/
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
533,6 → 586,7
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_APPEND_CNT 0x75
#define PACKET3_ME_WRITE 0x7A
 
#endif
 
/drivers/video/drm/radeon/pci.c
1,5 → 1,6
 
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/mod_devicetable.h>
#include <errno-base.h>
6,11 → 7,10
#include <pci.h>
#include <syscall.h>
 
extern int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn);
 
static LIST_HEAD(devices);
 
static pci_dev_t* pci_scan_device(u32_t bus, int devfn);
 
 
/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
 
345,14 → 345,17
}
};
 
if( pci_scan_filter(id, busnr, devfn) == 0)
return NULL;
 
hdr = PciRead8(busnr, devfn, PCI_HEADER_TYPE);
 
dev = (pci_dev_t*)kzalloc(sizeof(pci_dev_t), 0);
if(unlikely(dev == NULL))
return NULL;
 
INIT_LIST_HEAD(&dev->link);
 
if(unlikely(dev == NULL))
return NULL;
 
dev->pci_dev.busnr = busnr;
dev->pci_dev.devfn = devfn;
367,6 → 370,9
 
};
 
 
 
 
int pci_scan_slot(u32_t bus, int devfn)
{
int func, nr = 0;
405,49 → 411,6
return nr;
};
 
 
void pci_scan_bus(u32_t bus)
{
u32_t devfn;
pci_dev_t *dev;
 
 
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
 
}
 
int enum_pci_devices()
{
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
 
// list_initialize(&devices);
 
last_bus = PciApi(1);
 
 
if( unlikely(last_bus == -1))
return -1;
 
for(;bus <= last_bus; bus++)
pci_scan_bus(bus);
 
// for(dev = (dev_t*)devices.next;
// &dev->link != &devices;
// dev = (dev_t*)dev->link.next)
// {
// dbgprintf("PCI device %x:%x bus:%x devfn:%x\n",
// dev->pci_dev.vendor,
// dev->pci_dev.device,
// dev->pci_dev.bus,
// dev->pci_dev.devfn);
//
// }
return 0;
}
 
#define PCI_FIND_CAP_TTL 48
 
static int __pci_find_next_cap_ttl(unsigned int bus, unsigned int devfn,
513,317 → 476,364
}
 
 
#if 0
/**
* pci_set_power_state - Set the power state of a PCI device
* @dev: PCI device to be suspended
* @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
*
* Transition a device to a new power state, using the Power Management
* Capabilities in the device's config space.
*
* RETURN VALUE:
* -EINVAL if trying to enter a lower state than we're already in.
* 0 if we're already in the requested state.
* -EIO if device does not support PCI PM.
* 0 if we can successfully change the power state.
*/
int
pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 
 
int enum_pci_devices()
{
int pm, need_restore = 0;
u16 pmcsr, pmc;
pci_dev_t *dev;
u32_t last_bus;
u32_t bus = 0 , devfn = 0;
 
/* bound the state we're entering */
if (state > PCI_D3hot)
state = PCI_D3hot;
 
/*
* If the device or the parent bridge can't support PCI PM, ignore
* the request if we're doing anything besides putting it into D0
* (which would only happen on boot).
*/
if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
return 0;
last_bus = PciApi(1);
 
/* find PCI PM capability in list */
pm = pci_find_capability(dev, PCI_CAP_ID_PM);
 
/* abort if the device doesn't support PM capabilities */
if (!pm)
return -EIO;
if( unlikely(last_bus == -1))
return -1;
 
/* Validate current state:
* Can enter D0 from any state, but if we can only go deeper
* to sleep if we're already in a low power state
*/
if (state != PCI_D0 && dev->current_state > state) {
printk(KERN_ERR "%s(): %s: state=%d, current state=%d\n",
__FUNCTION__, pci_name(dev), state, dev->current_state);
return -EINVAL;
} else if (dev->current_state == state)
return 0; /* we're already there */
for(;bus <= last_bus; bus++)
{
for (devfn = 0; devfn < 0x100; devfn += 8)
pci_scan_slot(bus, devfn);
 
 
pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
printk(KERN_DEBUG
"PCI: %s has unsupported PM cap regs version (%u)\n",
pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
return -EIO;
}
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
dbgprintf("PCI device %x:%x bus:%x devfn:%x\n",
dev->pci_dev.vendor,
dev->pci_dev.device,
dev->pci_dev.busnr,
dev->pci_dev.devfn);
 
/* check if this device supports the desired state */
if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
return -EIO;
else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
return -EIO;
 
pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
 
/* If we're (effectively) in D3, force entire word to 0.
* This doesn't affect PME_Status, disables PME_En, and
* sets PowerState to 0.
*/
switch (dev->current_state) {
case PCI_D0:
case PCI_D1:
case PCI_D2:
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
pmcsr |= state;
break;
case PCI_UNKNOWN: /* Boot-up */
if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
&& !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
need_restore = 1;
/* Fall-through: force to D0 */
default:
pmcsr = 0;
break;
}
return 0;
}
 
/* enter specified state */
pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
const struct pci_device_id* find_pci_device(pci_dev_t* pdev, const struct pci_device_id *idlist)
{
pci_dev_t *dev;
const struct pci_device_id *ent;
 
/* Mandatory power management transition delays */
/* see PCI PM 1.1 5.6.1 table 18 */
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
msleep(pci_pm_d3_delay);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
udelay(200);
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.vendor != idlist->vendor )
continue;
 
/*
* Give firmware a chance to be called, such as ACPI _PRx, _PSx
* Firmware method after native method ?
*/
if (platform_pci_set_power_state)
platform_pci_set_power_state(dev, state);
for(ent = idlist; ent->vendor != 0; ent++)
{
if(unlikely(ent->device == dev->pci_dev.device))
{
pdev->pci_dev = dev->pci_dev;
return ent;
}
};
}
 
dev->current_state = state;
return NULL;
};
 
/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
* INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
* from D3hot to D0 _may_ perform an internal reset, thereby
* going to "D0 Uninitialized" rather than "D0 Initialized".
* For example, at least some versions of the 3c905B and the
* 3c556B exhibit this behaviour.
*
* At least some laptop BIOSen (e.g. the Thinkpad T21) leave
* devices in a D3hot state at boot. Consequently, we need to
* restore at least the BARs so that the device will be
* accessible to its driver.
*/
if (need_restore)
pci_restore_bars(dev);
struct pci_dev *
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from)
{
pci_dev_t *dev;
 
return 0;
}
#endif
dev = (pci_dev_t*)devices.next;
 
int pcibios_enable_resources(struct pci_dev *dev, int mask)
if(from != NULL)
{
u16_t cmd, old_cmd;
int idx;
struct resource *r;
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( &dev->pci_dev == from)
{
dev = (pci_dev_t*)dev->link.next;
break;
};
}
};
 
cmd = PciRead16(dev->busnr, dev->devfn, PCI_COMMAND);
old_cmd = cmd;
for (idx = 0; idx < PCI_NUM_RESOURCES; idx++)
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
/* Only set up the requested stuff */
if (!(mask & (1 << idx)))
if( dev->pci_dev.vendor != vendor )
continue;
 
r = &dev->resource[idx];
if (!(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
continue;
if ((idx == PCI_ROM_RESOURCE) &&
(!(r->flags & IORESOURCE_ROM_ENABLE)))
continue;
if (!r->start && r->end) {
printk(KERN_ERR "PCI: Device %s not available "
"because of resource %d collisions\n",
pci_name(dev), idx);
return -EINVAL;
if(dev->pci_dev.device == device)
{
return &dev->pci_dev;
}
if (r->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (r->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != old_cmd) {
printk("PCI: Enabling device %s (%04x -> %04x)\n",
pci_name(dev), old_cmd, cmd);
PciWrite16(dev->busnr, dev->devfn, PCI_COMMAND, cmd);
return NULL;
};
 
 
struct pci_dev * pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
{
pci_dev_t *dev;
 
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if ( dev->pci_dev.busnr == bus && dev->pci_dev.devfn == devfn)
return &dev->pci_dev;
}
return 0;
return NULL;
}
 
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from)
{
pci_dev_t *dev;
 
int pcibios_enable_device(struct pci_dev *dev, int mask)
dev = (pci_dev_t*)devices.next;
 
if(from != NULL)
{
int err;
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( &dev->pci_dev == from)
{
dev = (pci_dev_t*)dev->link.next;
break;
};
}
};
 
if ((err = pcibios_enable_resources(dev, mask)) < 0)
return err;
for(; &dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
{
if( dev->pci_dev.class == class)
{
return &dev->pci_dev;
}
}
 
// if (!dev->msi_enabled)
// return pcibios_enable_irq(dev);
return 0;
return NULL;
}
 
 
static int do_pci_enable_device(struct pci_dev *dev, int bars)
#define PIO_OFFSET 0x10000UL
#define PIO_MASK 0x0ffffUL
#define PIO_RESERVED 0x40000UL
 
#define IO_COND(addr, is_pio, is_mmio) do { \
unsigned long port = (unsigned long __force)addr; \
if (port >= PIO_RESERVED) { \
is_mmio; \
} else if (port > PIO_OFFSET) { \
port &= PIO_MASK; \
is_pio; \
}; \
} while (0)
 
/* Create a virtual mapping cookie for an IO port range */
void __iomem *ioport_map(unsigned long port, unsigned int nr)
{
int err;
return (void __iomem *) port;
}
 
// err = pci_set_power_state(dev, PCI_D0);
// if (err < 0 && err != -EIO)
// return err;
err = pcibios_enable_device(dev, bars);
// if (err < 0)
// return err;
// pci_fixup_device(pci_fixup_enable, dev);
void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
{
resource_size_t start = pci_resource_start(dev, bar);
resource_size_t len = pci_resource_len(dev, bar);
unsigned long flags = pci_resource_flags(dev, bar);
 
return 0;
if (!len || !start)
return NULL;
if (maxlen && len > maxlen)
len = maxlen;
if (flags & IORESOURCE_IO)
return ioport_map(start, len);
if (flags & IORESOURCE_MEM) {
return ioremap(start, len);
}
/* What? */
return NULL;
}
 
 
static int __pci_enable_device_flags(struct pci_dev *dev,
resource_size_t flags)
void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
{
int err;
int i, bars = 0;
IO_COND(addr, /* nothing */, iounmap(addr));
}
 
// if (atomic_add_return(1, &dev->enable_cnt) > 1)
// return 0; /* already enabled */
 
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
if (dev->resource[i].flags & flags)
bars |= (1 << i);
struct pci_bus_region {
resource_size_t start;
resource_size_t end;
};
 
err = do_pci_enable_device(dev, bars);
// if (err < 0)
// atomic_dec(&dev->enable_cnt);
return err;
static inline void
pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
struct resource *res)
{
region->start = res->start;
region->end = res->end;
}
 
static inline int pci_read_config_dword(struct pci_dev *dev, int where,
u32 *val)
{
*val = PciRead32(dev->busnr, dev->devfn, where);
return 1;
}
 
/**
* pci_enable_device - Initialize device before it's used by a driver.
* @dev: PCI device to be initialized
*
* Initialize device before it's used by a driver. Ask low-level code
* to enable I/O and memory. Wake up the device if it was suspended.
* Beware, this function can fail.
*
* Note we don't actually enable the device many times if we call
* this function repeatedly (we just increment the count).
*/
int pci_enable_device(struct pci_dev *dev)
static inline int pci_write_config_dword(struct pci_dev *dev, int where,
u32 val)
{
return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
PciWrite32(dev->busnr, dev->devfn, where, val);
return 1;
}
 
static inline int pci_read_config_word(struct pci_dev *dev, int where,
u16 *val)
{
*val = PciRead16(dev->busnr, dev->devfn, where);
return 1;
}
 
static inline int pci_write_config_word(struct pci_dev *dev, int where,
u16 val)
{
PciWrite16(dev->busnr, dev->devfn, where, val);
return 1;
}
 
struct pci_device_id* find_pci_device(pci_dev_t* pdev, struct pci_device_id *idlist)
 
int pci_enable_rom(struct pci_dev *pdev)
{
pci_dev_t *dev;
struct pci_device_id *ent;
struct resource *res = pdev->resource + PCI_ROM_RESOURCE;
struct pci_bus_region region;
u32 rom_addr;
 
for(dev = (pci_dev_t*)devices.next;
&dev->link != &devices;
dev = (pci_dev_t*)dev->link.next)
if (!res->flags)
return -1;
 
pcibios_resource_to_bus(pdev, &region, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
rom_addr |= region.start | PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
return 0;
}
 
void pci_disable_rom(struct pci_dev *pdev)
{
if( dev->pci_dev.vendor != idlist->vendor )
continue;
u32 rom_addr;
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_ENABLE;
pci_write_config_dword(pdev, pdev->rom_base_reg, rom_addr);
}
 
for(ent = idlist; ent->vendor != 0; ent++)
/**
* pci_get_rom_size - obtain the actual size of the ROM image
* @pdev: target PCI device
* @rom: kernel virtual pointer to image of ROM
* @size: size of PCI window
* return: size of actual ROM image
*
* Determine the actual length of the ROM image.
* The PCI window size could be much larger than the
* actual image size.
*/
size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
{
if(unlikely(ent->device == dev->pci_dev.device))
{
pdev->pci_dev = dev->pci_dev;
return ent;
void __iomem *image;
int last_image;
 
image = rom;
do {
void __iomem *pds;
/* Standard PCI ROMs start out with these bytes 55 AA */
if (readb(image) != 0x55) {
dev_err(&pdev->dev, "Invalid ROM contents\n");
break;
}
};
if (readb(image + 1) != 0xAA)
break;
/* get the PCI data structure and check its signature */
pds = image + readw(image + 24);
if (readb(pds) != 'P')
break;
if (readb(pds + 1) != 'C')
break;
if (readb(pds + 2) != 'I')
break;
if (readb(pds + 3) != 'R')
break;
last_image = readb(pds + 21) & 0x80;
/* this length is reliable */
image += readw(pds + 16) * 512;
} while (!last_image);
 
/* never return a size larger than the PCI resource window */
/* there are known ROMs that get the size wrong */
return min((size_t)(image - rom), size);
}
 
return NULL;
};
 
 
 
/**
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
* @size: pointer to receive size of pci window over ROM
* @return: kernel virtual pointer to image of ROM
*
* Return: kernel virtual pointer to image of ROM
*
* Map a PCI ROM into kernel space. If ROM is boot video ROM,
* the shadow BIOS copy will be returned instead of the
* actual ROM.
*/
 
#define legacyBIOSLocation 0xC0000
#define OS_BASE 0x80000000
 
void *pci_map_rom(struct pci_dev *pdev, size_t *size)
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
u32_t start;
void *rom;
loff_t start;
void __iomem *rom;
 
#if 0
// ENTER();
 
// dbgprintf("resource start %x end %x flags %x\n",
// res->start, res->end, res->flags);
/*
* IORESOURCE_ROM_SHADOW set on x86, x86_64 and IA64 supports legacy
* memory map if the VGA enable bit of the Bridge Control register is
* set for embedded VGA.
*/
 
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
 
#if 0
 
if (res->flags & IORESOURCE_ROM_SHADOW) {
/* primary video rom always starts here */
start = (u32_t)0xC0000;
start = (loff_t)0xC0000;
*size = 0x20000; /* cover C000:0 through E000:0 */
} else {
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY)) {
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
return (void *)(unsigned long)
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
/* assign the ROM an address if it doesn't have one */
//if (res->parent == NULL &&
// pci_assign_resource(pdev,PCI_ROM_RESOURCE))
return NULL;
// start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
// *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
// if (*size == 0)
// return NULL;
start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
*size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
if (*size == 0)
return NULL;
 
/* Enable ROM space decodes */
if (pci_enable_rom(pdev))
return NULL;
// if (pci_enable_rom(pdev))
// return NULL;
}
}
#endif
 
rom = ioremap(start, *size);
if (!rom) {
840,37 → 850,237
* size is much larger than the actual size of the ROM.
* True size is important if the ROM is going to be copied.
*/
*size = pci_get_rom_size(rom, *size);
*size = pci_get_rom_size(pdev, rom, *size);
// LEAVE();
return rom;
}
 
#endif
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
 
unsigned char tmp[32];
rom = NULL;
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
 
dbgprintf("Getting BIOS copy from legacy VBIOS location\n");
memcpy(tmp,(char*)(OS_BASE+legacyBIOSLocation), 32);
*size = tmp[2] * 512;
if (*size > 0x10000 )
iounmap(rom);
 
/* Disable again before continuing, leave enabled if pci=rom */
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
pci_disable_rom(pdev);
}
 
int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
{
*size = 0;
dbgprintf("Invalid BIOS length field\n");
dev->dma_mask = mask;
 
return 0;
}
 
 
 
static void __pci_set_master(struct pci_dev *dev, bool enable)
{
u16 old_cmd, cmd;
 
pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
if (enable)
cmd = old_cmd | PCI_COMMAND_MASTER;
else
rom = (void*)( OS_BASE+legacyBIOSLocation);
cmd = old_cmd & ~PCI_COMMAND_MASTER;
if (cmd != old_cmd) {
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
dev->is_busmaster = enable;
}
 
return rom;
 
/* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
* Enables bus-mastering on the device and calls pcibios_set_master()
* to do the needed arch specific settings.
*/
void pci_set_master(struct pci_dev *dev)
{
__pci_set_master(dev, true);
// pcibios_set_master(dev);
}
 
/**
* pci_clear_master - disables bus-mastering for device dev
* @dev: the PCI device to disable
*/
void pci_clear_master(struct pci_dev *dev)
{
__pci_set_master(dev, false);
}
 
int
pci_set_dma_mask(struct pci_dev *dev, u64 mask)
 
static inline int pcie_cap_version(const struct pci_dev *dev)
{
// if (!pci_dma_supported(dev, mask))
// return -EIO;
return dev->pcie_flags_reg & PCI_EXP_FLAGS_VERS;
}
 
dev->dma_mask = mask;
static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
{
return true;
}
 
static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_ENDPOINT ||
type == PCI_EXP_TYPE_LEG_END;
}
 
static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
(type == PCI_EXP_TYPE_DOWNSTREAM &&
dev->pcie_flags_reg & PCI_EXP_FLAGS_SLOT);
}
 
static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
{
int type = pci_pcie_type(dev);
 
return pcie_cap_version(dev) > 1 ||
type == PCI_EXP_TYPE_ROOT_PORT ||
type == PCI_EXP_TYPE_RC_EC;
}
 
static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
{
if (!pci_is_pcie(dev))
return false;
 
switch (pos) {
case PCI_EXP_FLAGS_TYPE:
return true;
case PCI_EXP_DEVCAP:
case PCI_EXP_DEVCTL:
case PCI_EXP_DEVSTA:
return pcie_cap_has_devctl(dev);
case PCI_EXP_LNKCAP:
case PCI_EXP_LNKCTL:
case PCI_EXP_LNKSTA:
return pcie_cap_has_lnkctl(dev);
case PCI_EXP_SLTCAP:
case PCI_EXP_SLTCTL:
case PCI_EXP_SLTSTA:
return pcie_cap_has_sltctl(dev);
case PCI_EXP_RTCTL:
case PCI_EXP_RTCAP:
case PCI_EXP_RTSTA:
return pcie_cap_has_rtctl(dev);
case PCI_EXP_DEVCAP2:
case PCI_EXP_DEVCTL2:
case PCI_EXP_LNKCAP2:
case PCI_EXP_LNKCTL2:
case PCI_EXP_LNKSTA2:
return pcie_cap_version(dev) > 1;
default:
return false;
}
}
 
/*
* Note that these accessor functions are only for the "PCI Express
* Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
* other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
*/
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
{
int ret;
 
*val = 0;
if (pos & 1)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_word() fails, it may
* have been written as 0xFFFF if hardware error happens
* during pci_read_config_word().
*/
if (ret)
*val = 0;
return ret;
}
 
/*
* For Functions that do not implement the Slot Capabilities,
* Slot Status, and Slot Control registers, these spaces must
* be hardwired to 0b, with the exception of the Presence Detect
* State bit in the Slot Status register of Downstream Ports,
* which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
*/
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_word);
 
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
{
int ret;
 
*val = 0;
if (pos & 3)
return -EINVAL;
 
if (pcie_capability_reg_implemented(dev, pos)) {
ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
/*
* Reset *val to 0 if pci_read_config_dword() fails, it may
* have been written as 0xFFFFFFFF if hardware error happens
* during pci_read_config_dword().
*/
if (ret)
*val = 0;
return ret;
}
 
if (pci_is_pcie(dev) && pos == PCI_EXP_SLTCTL &&
pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) {
*val = PCI_EXP_SLTSTA_PDS;
}
 
return 0;
}
EXPORT_SYMBOL(pcie_capability_read_dword);
 
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
{
if (pos & 1)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_word);
 
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
{
if (pos & 3)
return -EINVAL;
 
if (!pcie_capability_reg_implemented(dev, pos))
return 0;
 
return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
}
EXPORT_SYMBOL(pcie_capability_write_dword);
 
/drivers/video/drm/radeon/r100.c
27,9 → 27,8
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
40,6 → 39,7
#include "atom.h"
 
#include <linux/firmware.h>
#include <linux/module.h>
 
#include "r100_reg_safe.h"
#include "rn50_reg_safe.h"
64,12 → 64,57
 
/* This files gather functions specifics to:
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
* and others in some cases.
*/
 
/**
* r100_wait_for_vblank - vblank wait asic callback.
*
* @rdev: radeon_device pointer
* @crtc: crtc to wait for vblank on
*
* Wait for vblank on the requested crtc (r1xx-r4xx).
*/
void r100_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
 
if (crtc >= rdev->num_crtc)
return;
 
if (crtc == 0) {
if (RREG32(RADEON_CRTC_GEN_CNTL) & RADEON_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC_STATUS) & RADEON_CRTC_VBLANK_CUR)
break;
udelay(1);
}
}
} else {
if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC2_STATUS) & RADEON_CRTC2_VBLANK_CUR)
break;
udelay(1);
}
}
}
}
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
int i;
 
/* Lock the graphics update lock */
/* update the scanout addresses */
76,7 → 121,11
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
 
/* Wait for update_pending to go high. */
while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
break;
udelay(1);
}
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
 
/* Unlock the lock, so double-buffering can take place inside vblank */
95,6 → 144,15
}
 
/* hpd for digital panel detect/disconnect */
/**
* r100_hpd_sense - hpd sense callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Checks if a digital monitor is connected (r1xx-r4xx).
* Returns true if connected, false if not connected.
*/
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
bool connected = false;
114,6 → 172,14
return connected;
}
 
/**
* r100_hpd_set_polarity - hpd set polarity callback.
*
* @rdev: radeon_device pointer
* @hpd: hpd (hotplug detect) pin
*
* Set the polarity of the hpd pin (r1xx-r4xx).
*/
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd)
{
142,47 → 208,48
}
}
 
/**
* r100_hpd_init - hpd setup callback.
*
* @rdev: radeon_device pointer
*
* Setup the hpd pins used by the card (r1xx-r4xx).
* Set the polarity, and enable the hpd interrupts.
*/
void r100_hpd_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = true;
break;
default:
break;
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
// radeon_irq_kms_enable_hpd(rdev, enable);
}
if (rdev->irq.installed)
r100_irq_set(rdev);
}
 
/**
* r100_hpd_fini - hpd tear down callback.
*
* @rdev: radeon_device pointer
*
* Tear down the hpd pins used by the card (r1xx-r4xx).
* Disable the hpd interrupts.
*/
void r100_hpd_fini(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
rdev->irq.hpd[1] = false;
break;
default:
break;
disable |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disable);
}
}
 
/*
* PCI GART
199,7 → 266,7
{
int r;
 
if (rdev->gart.table.ram.ptr) {
if (rdev->gart.ptr) {
WARN(1, "R100 PCI GART already initialized\n");
return 0;
}
208,20 → 275,11
if (r)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
 
/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
void r100_enable_bm(struct radeon_device *rdev)
{
uint32_t tmp;
/* Enable bus mastering */
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
}
 
int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
238,6 → 296,9
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
WREG32(RADEON_AIC_CNTL, tmp);
r100_pci_gart_tlb_flush(rdev);
DRM_INFO("PCI GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
255,10 → 316,12
 
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
u32 *gtt = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
}
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
gtt[i] = cpu_to_le32(lower_32_bits(addr));
return 0;
}
 
278,18 → 341,15
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
if (rdev->irq.sw_int) {
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
tmp |= RADEON_GUI_IDLE_MASK;
}
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
atomic_read(&rdev->irq.pflip[0])) {
tmp |= RADEON_CRTC_VBLANK_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
atomic_read(&rdev->irq.pflip[1])) {
tmp |= RADEON_CRTC2_VBLANK_MASK;
}
if (rdev->irq.hpd[0]) {
313,7 → 373,7
WREG32(R_000044_GEN_INT_STATUS, tmp);
}
 
static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
static uint32_t r100_irq_ack(struct radeon_device *rdev)
{
uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
uint32_t irq_mask = RADEON_SW_INT_TEST |
320,12 → 380,6
RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= RADEON_GUI_IDLE_STAT;
}
 
if (irqs) {
WREG32(RADEON_GEN_INT_STATUS, irqs);
}
337,9 → 391,6
uint32_t status, msi_rearm;
bool queue_hotplug = false;
 
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
 
status = r100_irq_ack(rdev);
if (!status) {
return IRQ_NONE;
350,14 → 401,8
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
if (rdev->irq.crtc_vblank_int[0]) {
387,8 → 432,6
}
status = r100_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
if (rdev->msi_enabled) {
400,9 → 443,7
WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
break;
default:
msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
WREG32(RADEON_MSI_REARM_EN, msi_rearm);
WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
break;
}
}
422,35 → 463,47
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
 
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
 
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
/* Unused on older asics, since we don't have semaphores or multiple rings */
BUG();
}
 
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence)
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = PAGE_SIZE;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
uint32_t stride_pixels;
unsigned ndw;
462,26 → 515,26
/* radeon pitch is /64 */
pitch = stride_bytes / 64;
stride_pixels = stride_bytes / 4;
num_loops = DIV_ROUND_UP(num_pages, 8191);
num_loops = DIV_ROUND_UP(num_gpu_pages, 8191);
 
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
r = radeon_ring_lock(rdev, ndw);
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
}
while (num_pages > 0) {
cur_pages = num_pages;
while (num_gpu_pages > 0) {
cur_pages = num_gpu_pages;
if (cur_pages > 8191) {
cur_pages = 8191;
}
num_pages -= cur_pages;
num_gpu_pages -= cur_pages;
 
/* pages are in Y direction - height
page width in X direction - width */
radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
radeon_ring_write(ring,
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_SRC_CLIPPING |
493,26 → 546,26
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
radeon_ring_write(rdev, num_pages);
radeon_ring_write(rdev, num_pages);
radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
radeon_ring_write(ring, num_gpu_pages);
radeon_ring_write(ring, num_gpu_pages);
radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
}
radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
return r;
}
 
531,21 → 584,21
return -1;
}
 
void r100_ring_start(struct radeon_device *rdev)
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
 
r = radeon_ring_lock(rdev, 2);
r = radeon_ring_lock(rdev, ring, 2);
if (r) {
return;
}
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
}
 
 
646,6 → 699,7
 
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
671,7 → 725,9
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
r = radeon_ring_init(rdev, ring_size);
r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
0, 0x7fffff, RADEON_CP_PACKET2);
if (r) {
return r;
}
680,7 → 736,7
rb_blksz = 9;
/* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
rdev->cp.align_mask = 16 - 1;
ring->align_mask = 16 - 1;
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64;
/* Force CP_RB_WPTR write if written more than one time before the
710,12 → 766,13
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
 
/* Set ring address */
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
WREG32(RADEON_CP_RB_WPTR, 0);
ring->wptr = 0;
WREG32(RADEON_CP_RB_WPTR, ring->wptr);
 
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
731,10 → 788,7
 
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
/* protect against crazy HW on resume */
rdev->cp.wptr &= rdev->cp.ptr_mask;
ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
742,13 → 796,13
WREG32(RADEON_CP_RB_WPTR_DELAY, 0);
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev);
r = radeon_ring_test(rdev);
radeon_ring_start(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
}
rdev->cp.ready = true;
ring->ready = true;
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
760,7 → 814,7
}
/* Disable ring */
r100_cp_disable(rdev);
radeon_ring_fini(rdev);
radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
 
768,7 → 822,7
{
/* Disable ring */
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
rdev->cp.ready = false;
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
778,17 → 832,116
}
}
 
void r100_cp_commit(struct radeon_device *rdev)
#if 0
/*
* CS functions
*/
int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx,
unsigned reg)
{
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
(void)RREG32(RADEON_CP_RB_WPTR);
int r;
u32 tile_flags = 0;
u32 tmp;
struct radeon_cs_reloc *reloc;
u32 value;
 
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
idx, reg);
r100_cs_dump_packet(p, pkt);
return r;
}
 
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
 
#if 0
/*
* CS functions
*/
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_DST_TILE_MACRO;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
if (reg == RADEON_SRC_PITCH_OFFSET) {
DRM_ERROR("Cannot src blit from microtiled surface\n");
r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
tile_flags |= RADEON_DST_TILE_MICRO;
}
 
tmp |= tile_flags;
p->ib.ptr[idx] = (value & 0x3fc00000) | tmp;
} else
p->ib.ptr[idx] = (value & 0xffc00000) | tmp;
return 0;
}
 
int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int idx)
{
unsigned c, i;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
u32 idx_value;
 
ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
if (c > 16) {
DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return -EINVAL;
}
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
 
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
return r;
}
 
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
const unsigned *auth, unsigned n,
841,7 → 994,7
unsigned i;
unsigned idx;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
920,7 → 1073,7
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
 
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
1099,7 → 1252,7
u32 tile_flags = 0;
u32 idx_value;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
 
idx_value = radeon_get_ib_value(p, idx);
1159,6 → 1312,16
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_TXO_MICRO_TILE_X2;
 
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
1230,7 → 1393,7
r100_cs_dump_packet(p, pkt);
return r;
}
 
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1239,6 → 1402,8
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
} else
ib[idx] = idx_value;
 
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
1443,7 → 1608,7
volatile uint32_t *ib;
int r;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
1562,6 → 1727,8
int r;
 
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (!track)
return -ENOMEM;
r100_cs_track_clear(p->rdev, track);
p->track = track;
do {
1600,72 → 1767,401
return 0;
}
 
#endif
static void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
DRM_ERROR("compress format %d\n", t->compress_format);
}
 
/*
* Global GPU functions
*/
void r100_errata(struct radeon_device *rdev)
static int r100_track_compress_size(int compress_format, int w, int h)
{
rdev->pll_errata = 0;
int block_width, block_height, block_bytes;
int wblocks, hblocks;
int min_wblocks;
int sz;
 
if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
block_width = 4;
block_height = 4;
 
switch (compress_format) {
case R100_TRACK_COMP_DXT1:
block_bytes = 8;
min_wblocks = 4;
break;
default:
case R100_TRACK_COMP_DXT35:
block_bytes = 16;
min_wblocks = 2;
break;
}
 
if (rdev->family == CHIP_RV100 ||
rdev->family == CHIP_RS100 ||
rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
hblocks = (h + block_height - 1) / block_height;
wblocks = (w + block_width - 1) / block_width;
if (wblocks < min_wblocks)
wblocks = min_wblocks;
sz = wblocks * hblocks * block_bytes;
return sz;
}
 
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
unsigned compress_format = track->textures[idx].compress_format;
 
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
 
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
 
size += track->textures[idx].cube_info[face].offset;
 
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
 
/* Wait for vertical sync on primary CRTC */
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
uint32_t crtc_gen_cntl, tmp;
int i;
struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h, d;
int ret;
 
crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
!(crtc_gen_cntl & RADEON_CRTC_EN)) {
return;
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
if (track->textures[u].lookup_disable)
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
return -EINVAL;
}
/* Clear the CRTC_VBLANK_SAVE bit */
WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_CRTC_STATUS);
if (tmp & RADEON_CRTC_VBLANK_SAVE) {
return;
size = 0;
for (i = 0; i <= track->textures[u].num_levels; i++) {
if (track->textures[u].use_pitch) {
if (rdev->family < CHIP_R300)
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
else
w = track->textures[u].pitch / (1 << i);
} else {
w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
DRM_UDELAY(1);
h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
if (track->textures[u].tex_coord_type == 1) {
d = (1 << track->textures[u].txdepth) / (1 << i);
if (!d)
d = 1;
} else {
d = 1;
}
if (track->textures[u].compress_format) {
 
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
size += w * h * d;
}
size *= track->textures[u].cpp;
 
/* Wait for vertical sync on secondary CRTC */
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
switch (track->textures[u].tex_coord_type) {
case 0:
case 1:
break;
case 2:
if (track->separate_cube) {
ret = r100_cs_track_cube(rdev, track, u);
if (ret)
return ret;
} else
size *= 6;
break;
default:
DRM_ERROR("Invalid texture coordinate type %u for unit "
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
"%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
}
return 0;
}
 
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
{
uint32_t crtc2_gen_cntl, tmp;
int i;
unsigned i;
unsigned long size;
unsigned prim_walk;
unsigned nverts;
unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
!(crtc2_gen_cntl & RADEON_CRTC2_EN))
return;
if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
 
/* Clear the CRTC_VBLANK_SAVE bit */
WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_CRTC2_STATUS);
if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
return;
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
DRM_UDELAY(1);
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
track->cb_dirty = false;
 
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
return -EINVAL;
}
}
track->zb_dirty = false;
 
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->aa.robj));
DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
i, track->aa.pitch, track->cb[0].cpp,
track->aa.offset, track->maxy);
return -EINVAL;
}
}
track->aa_dirty = false;
 
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
} else {
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
}
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
return -EINVAL;
}
}
break;
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
track->immd_dwords, size);
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
prim_walk);
return -EINVAL;
}
 
if (track->tex_dirty) {
track->tex_dirty = false;
return r100_cs_track_texture_check(rdev, track);
}
return 0;
}
 
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i, face;
 
track->cb_dirty = true;
track->zb_dirty = true;
track->tex_dirty = true;
track->aa_dirty = true;
 
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
track->num_texture = 3;
else
track->num_texture = 6;
track->maxy = 2048;
track->separate_cube = 1;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
track->aaresolve = false;
track->aa.robj = NULL;
}
 
for (i = 0; i < track->num_cb; i++) {
track->cb[i].robj = NULL;
track->cb[i].pitch = 8192;
track->cb[i].cpp = 16;
track->cb[i].offset = 0;
}
track->z_enabled = true;
track->zb.robj = NULL;
track->zb.pitch = 8192;
track->zb.cpp = 4;
track->zb.offset = 0;
track->vtx_size = 0x7F;
track->immd_dwords = 0xFFFFFFFFUL;
track->num_arrays = 11;
track->max_indx = 0x00FFFFFFUL;
for (i = 0; i < track->num_arrays; i++) {
track->arrays[i].robj = NULL;
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
track->textures[i].width_11 = 1 << 11;
track->textures[i].height_11 = 1 << 11;
track->textures[i].num_levels = 12;
if (rdev->family <= CHIP_RS200) {
track->textures[i].tex_coord_type = 0;
track->textures[i].txdepth = 0;
} else {
track->textures[i].txdepth = 16;
track->textures[i].tex_coord_type = 1;
}
track->textures[i].cpp = 64;
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
for (face = 0; face < 5; face++) {
track->textures[i].cube_info[face].robj = NULL;
track->textures[i].cube_info[face].width = 16536;
track->textures[i].cube_info[face].height = 16536;
track->textures[i].cube_info[face].offset = 0;
}
}
}
#endif
 
/*
* Global GPU functions
*/
static void r100_errata(struct radeon_device *rdev)
{
rdev->pll_errata = 0;
 
if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
}
 
if (rdev->family == CHIP_RV100 ||
rdev->family == CHIP_RS100 ||
rdev->family == CHIP_RS200) {
rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
}
}
 
static int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
{
unsigned i;
uint32_t tmp;
 
1714,79 → 2210,27
return -1;
}
 
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = GetTimerTicks();
}
 
/**
* r100_gpu_cp_is_lockup() - check if CP is lockup by recording information
* @rdev: radeon device structure
* @lockup: r100_gpu_lockup structure holding CP lockup tracking informations
* @cp: radeon_cp structure holding CP information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* r100_gpu_cp_is_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
{
unsigned long cjiffies, elapsed;
 
cjiffies = GetTimerTicks();
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = GetTimerTicks();
return false;
}
if (cp->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
lockup->last_cp_rptr = cp->rptr;
lockup->last_jiffies = GetTimerTicks();
return false;
}
elapsed = jiffies_to_msecs(cjiffies - lockup->last_jiffies);
if (elapsed >= 10000) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
 
bool r100_gpu_is_lockup(struct radeon_device *rdev)
{
u32 rbbm_status;
int r;
 
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
 
/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
void r100_enable_bm(struct radeon_device *rdev)
{
uint32_t tmp;
/* Enable bus mastering */
tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
WREG32(RADEON_BUS_CNTL, tmp);
}
 
void r100_bm_disable(struct radeon_device *rdev)
1802,8 → 2246,7
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4);
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB);
pci_clear_master(rdev->pdev);
mdelay(1);
}
 
1856,7 → 2299,6
if (G_000E40_SE_BUSY(status) || G_000E40_RE_BUSY(status) ||
G_000E40_TAM_BUSY(status) || G_000E40_PB_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
2079,7 → 2521,7
WREG32(RADEON_CONFIG_CNTL, temp);
}
 
void r100_mc_init(struct radeon_device *rdev)
static void r100_mc_init(struct radeon_device *rdev)
{
u64 base;
 
2113,7 → 2555,7
* or the chip could hang on a subsequent access
*/
if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
udelay(5000);
mdelay(5);
}
 
/* This function is required to workaround a hardware bug in some (all?)
2151,7 → 2593,7
r100_pll_errata_after_data(rdev);
}
 
void r100_set_safe_registers(struct radeon_device *rdev)
static void r100_set_safe_registers(struct radeon_device *rdev)
{
if (ASIC_IS_RN50(rdev)) {
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
2194,21 → 2636,22
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
 
radeon_ring_free_size(rdev);
radeon_ring_free_size(rdev, ring);
rdp = RREG32(RADEON_CP_RB_RPTR);
wdp = RREG32(RADEON_CP_RB_WPTR);
count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
for (j = 0; j <= count; j++) {
i = (rdp + j) & rdev->cp.ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
i = (rdp + j) & ring->ptr_mask;
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
}
return 0;
}
2876,384 → 3319,8
}
}
 
#if 0
static inline void r100_cs_track_texture_print(struct r100_cs_track_texture *t)
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
DRM_ERROR("pitch %d\n", t->pitch);
DRM_ERROR("use_pitch %d\n", t->use_pitch);
DRM_ERROR("width %d\n", t->width);
DRM_ERROR("width_11 %d\n", t->width_11);
DRM_ERROR("height %d\n", t->height);
DRM_ERROR("height_11 %d\n", t->height_11);
DRM_ERROR("num levels %d\n", t->num_levels);
DRM_ERROR("depth %d\n", t->txdepth);
DRM_ERROR("bpp %d\n", t->cpp);
DRM_ERROR("coordinate type %d\n", t->tex_coord_type);
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w);
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
DRM_ERROR("compress format %d\n", t->compress_format);
}
 
static int r100_track_compress_size(int compress_format, int w, int h)
{
int block_width, block_height, block_bytes;
int wblocks, hblocks;
int min_wblocks;
int sz;
 
block_width = 4;
block_height = 4;
 
switch (compress_format) {
case R100_TRACK_COMP_DXT1:
block_bytes = 8;
min_wblocks = 4;
break;
default:
case R100_TRACK_COMP_DXT35:
block_bytes = 16;
min_wblocks = 2;
break;
}
 
hblocks = (h + block_height - 1) / block_height;
wblocks = (w + block_width - 1) / block_width;
if (wblocks < min_wblocks)
wblocks = min_wblocks;
sz = wblocks * hblocks * block_bytes;
return sz;
}
 
static int r100_cs_track_cube(struct radeon_device *rdev,
struct r100_cs_track *track, unsigned idx)
{
unsigned face, w, h;
struct radeon_bo *cube_robj;
unsigned long size;
unsigned compress_format = track->textures[idx].compress_format;
 
for (face = 0; face < 5; face++) {
cube_robj = track->textures[idx].cube_info[face].robj;
w = track->textures[idx].cube_info[face].width;
h = track->textures[idx].cube_info[face].height;
 
if (compress_format) {
size = r100_track_compress_size(compress_format, w, h);
} else
size = w * h;
size *= track->textures[idx].cpp;
 
size += track->textures[idx].cube_info[face].offset;
 
if (size > radeon_bo_size(cube_robj)) {
DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
size, radeon_bo_size(cube_robj));
r100_cs_track_texture_print(&track->textures[idx]);
return -1;
}
}
return 0;
}
 
static int r100_cs_track_texture_check(struct radeon_device *rdev,
struct r100_cs_track *track)
{
struct radeon_bo *robj;
unsigned long size;
unsigned u, i, w, h, d;
int ret;
 
for (u = 0; u < track->num_texture; u++) {
if (!track->textures[u].enabled)
continue;
if (track->textures[u].lookup_disable)
continue;
robj = track->textures[u].robj;
if (robj == NULL) {
DRM_ERROR("No texture bound to unit %u\n", u);
return -EINVAL;
}
size = 0;
for (i = 0; i <= track->textures[u].num_levels; i++) {
if (track->textures[u].use_pitch) {
if (rdev->family < CHIP_R300)
w = (track->textures[u].pitch / track->textures[u].cpp) / (1 << i);
else
w = track->textures[u].pitch / (1 << i);
} else {
w = track->textures[u].width;
if (rdev->family >= CHIP_RV515)
w |= track->textures[u].width_11;
w = w / (1 << i);
if (track->textures[u].roundup_w)
w = roundup_pow_of_two(w);
}
h = track->textures[u].height;
if (rdev->family >= CHIP_RV515)
h |= track->textures[u].height_11;
h = h / (1 << i);
if (track->textures[u].roundup_h)
h = roundup_pow_of_two(h);
if (track->textures[u].tex_coord_type == 1) {
d = (1 << track->textures[u].txdepth) / (1 << i);
if (!d)
d = 1;
} else {
d = 1;
}
if (track->textures[u].compress_format) {
 
size += r100_track_compress_size(track->textures[u].compress_format, w, h) * d;
/* compressed textures are block based */
} else
size += w * h * d;
}
size *= track->textures[u].cpp;
 
switch (track->textures[u].tex_coord_type) {
case 0:
case 1:
break;
case 2:
if (track->separate_cube) {
ret = r100_cs_track_cube(rdev, track, u);
if (ret)
return ret;
} else
size *= 6;
break;
default:
DRM_ERROR("Invalid texture coordinate type %u for unit "
"%u\n", track->textures[u].tex_coord_type, u);
return -EINVAL;
}
if (size > radeon_bo_size(robj)) {
DRM_ERROR("Texture of unit %u needs %lu bytes but is "
"%lu\n", u, size, radeon_bo_size(robj));
r100_cs_track_texture_print(&track->textures[u]);
return -EINVAL;
}
}
return 0;
}
 
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i;
unsigned long size;
unsigned prim_walk;
unsigned nverts;
unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
 
if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
!track->blend_read_enable)
num_cb = 0;
 
for (i = 0; i < num_cb; i++) {
if (track->cb[i].robj == NULL) {
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
return -EINVAL;
}
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
size += track->cb[i].offset;
if (size > radeon_bo_size(track->cb[i].robj)) {
DRM_ERROR("[drm] Buffer too small for color buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->cb[i].robj));
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
i, track->cb[i].pitch, track->cb[i].cpp,
track->cb[i].offset, track->maxy);
return -EINVAL;
}
}
track->cb_dirty = false;
 
if (track->zb_dirty && track->z_enabled) {
if (track->zb.robj == NULL) {
DRM_ERROR("[drm] No buffer for z buffer !\n");
return -EINVAL;
}
size = track->zb.pitch * track->zb.cpp * track->maxy;
size += track->zb.offset;
if (size > radeon_bo_size(track->zb.robj)) {
DRM_ERROR("[drm] Buffer too small for z buffer "
"(need %lu have %lu) !\n", size,
radeon_bo_size(track->zb.robj));
DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
track->zb.pitch, track->zb.cpp,
track->zb.offset, track->maxy);
return -EINVAL;
}
}
track->zb_dirty = false;
 
if (track->aa_dirty && track->aaresolve) {
if (track->aa.robj == NULL) {
DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
return -EINVAL;
}
/* I believe the format comes from colorbuffer0. */
size = track->aa.pitch * track->cb[0].cpp * track->maxy;
size += track->aa.offset;
if (size > radeon_bo_size(track->aa.robj)) {
DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
"(need %lu have %lu) !\n", i, size,
radeon_bo_size(track->aa.robj));
DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
i, track->aa.pitch, track->cb[0].cpp,
track->aa.offset, track->maxy);
return -EINVAL;
}
}
track->aa_dirty = false;
 
prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
if (track->vap_vf_cntl & (1 << 14)) {
nverts = track->vap_alt_nverts;
} else {
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
}
switch (prim_walk) {
case 1:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * track->max_indx * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
DRM_ERROR("Max indices %u\n", track->max_indx);
return -EINVAL;
}
}
break;
case 2:
for (i = 0; i < track->num_arrays; i++) {
size = track->arrays[i].esize * (nverts - 1) * 4;
if (track->arrays[i].robj == NULL) {
DRM_ERROR("(PW %u) Vertex array %u no buffer "
"bound\n", prim_walk, i);
return -EINVAL;
}
if (size > radeon_bo_size(track->arrays[i].robj)) {
dev_err(rdev->dev, "(PW %u) Vertex array %u "
"need %lu dwords have %lu dwords\n",
prim_walk, i, size >> 2,
radeon_bo_size(track->arrays[i].robj)
>> 2);
return -EINVAL;
}
}
break;
case 3:
size = track->vtx_size * nverts;
if (size != track->immd_dwords) {
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
track->immd_dwords, size);
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
nverts, track->vtx_size);
return -EINVAL;
}
break;
default:
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
prim_walk);
return -EINVAL;
}
 
if (track->tex_dirty) {
track->tex_dirty = false;
return r100_cs_track_texture_check(rdev, track);
}
return 0;
}
 
void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
{
unsigned i, face;
 
track->cb_dirty = true;
track->zb_dirty = true;
track->tex_dirty = true;
track->aa_dirty = true;
 
if (rdev->family < CHIP_R300) {
track->num_cb = 1;
if (rdev->family <= CHIP_RS200)
track->num_texture = 3;
else
track->num_texture = 6;
track->maxy = 2048;
track->separate_cube = 1;
} else {
track->num_cb = 4;
track->num_texture = 16;
track->maxy = 4096;
track->separate_cube = 0;
track->aaresolve = false;
track->aa.robj = NULL;
}
 
for (i = 0; i < track->num_cb; i++) {
track->cb[i].robj = NULL;
track->cb[i].pitch = 8192;
track->cb[i].cpp = 16;
track->cb[i].offset = 0;
}
track->z_enabled = true;
track->zb.robj = NULL;
track->zb.pitch = 8192;
track->zb.cpp = 4;
track->zb.offset = 0;
track->vtx_size = 0x7F;
track->immd_dwords = 0xFFFFFFFFUL;
track->num_arrays = 11;
track->max_indx = 0x00FFFFFFUL;
for (i = 0; i < track->num_arrays; i++) {
track->arrays[i].robj = NULL;
track->arrays[i].esize = 0x7F;
}
for (i = 0; i < track->num_texture; i++) {
track->textures[i].compress_format = R100_TRACK_COMP_NONE;
track->textures[i].pitch = 16536;
track->textures[i].width = 16536;
track->textures[i].height = 16536;
track->textures[i].width_11 = 1 << 11;
track->textures[i].height_11 = 1 << 11;
track->textures[i].num_levels = 12;
if (rdev->family <= CHIP_RS200) {
track->textures[i].tex_coord_type = 0;
track->textures[i].txdepth = 0;
} else {
track->textures[i].txdepth = 16;
track->textures[i].tex_coord_type = 1;
}
track->textures[i].cpp = 64;
track->textures[i].robj = NULL;
/* CS IB emission code makes sure texture unit are disabled */
track->textures[i].enabled = false;
track->textures[i].lookup_disable = false;
track->textures[i].roundup_w = true;
track->textures[i].roundup_h = true;
if (track->separate_cube)
for (face = 0; face < 5; face++) {
track->textures[i].cube_info[face].robj = NULL;
track->textures[i].cube_info[face].width = 16536;
track->textures[i].cube_info[face].height = 16536;
track->textures[i].cube_info[face].offset = 0;
}
}
}
#endif
 
int r100_ring_test(struct radeon_device *rdev)
{
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
3265,15 → 3332,15
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, 2);
r = radeon_ring_lock(rdev, ring, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
radeon_ring_write(rdev, PACKET0(scratch, 0));
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(ring, PACKET0(scratch, 0));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
3294,14 → 3361,22
 
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(rdev, ib->gpu_addr);
radeon_ring_write(rdev, ib->length_dw);
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 
if (ring->rptr_save_reg) {
u32 next_rptr = ring->wptr + 2 + 3;
radeon_ring_write(ring, PACKET0(ring->rptr_save_reg, 0));
radeon_ring_write(ring, next_rptr);
}
 
int r100_ib_test(struct radeon_device *rdev)
radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
radeon_ring_write(ring, ib->gpu_addr);
radeon_ring_write(ring, ib->length_dw);
}
 
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib *ib;
struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
3313,28 → 3388,29
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, &ib);
r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, NULL, 256);
if (r) {
return r;
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
goto free_scratch;
}
ib->ptr[0] = PACKET0(scratch, 0);
ib->ptr[1] = 0xDEADBEEF;
ib->ptr[2] = PACKET2(0);
ib->ptr[3] = PACKET2(0);
ib->ptr[4] = PACKET2(0);
ib->ptr[5] = PACKET2(0);
ib->ptr[6] = PACKET2(0);
ib->ptr[7] = PACKET2(0);
ib->length_dw = 8;
r = radeon_ib_schedule(rdev, ib);
ib.ptr[0] = PACKET0(scratch, 0);
ib.ptr[1] = 0xDEADBEEF;
ib.ptr[2] = PACKET2(0);
ib.ptr[3] = PACKET2(0);
ib.ptr[4] = PACKET2(0);
ib.ptr[5] = PACKET2(0);
ib.ptr[6] = PACKET2(0);
ib.ptr[7] = PACKET2(0);
ib.length_dw = 8;
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
goto free_ib;
}
r = radeon_fence_wait(ib->fence, false);
r = radeon_fence_wait(ib.fence, false);
if (r) {
return r;
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
goto free_ib;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
3350,41 → 3426,19
scratch, tmp);
r = -EINVAL;
}
free_ib:
radeon_ib_free(rdev, &ib);
free_scratch:
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
 
void r100_ib_fini(struct radeon_device *rdev)
{
radeon_ib_pool_fini(rdev);
}
 
int r100_ib_init(struct radeon_device *rdev)
{
int r;
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
r = r100_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "failed testing IB (%d).\n", r);
r100_ib_fini(rdev);
return r;
}
return 0;
}
 
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
rdev->cp.ready = false;
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
 
/* Save few CRTC registers */
3454,20 → 3508,6
dev_warn(rdev->dev, "Failed to create r100_mc debugfs file.\n");
}
 
 
int drm_order(unsigned long size)
{
int order;
unsigned long tmp;
 
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
 
if (size & (size - 1))
++order;
 
return order;
}
 
static void r100_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
3498,7 → 3538,7
r100_mc_resume(rdev, &save);
}
 
void r100_clock_startup(struct radeon_device *rdev)
static void r100_clock_startup(struct radeon_device *rdev)
{
u32 tmp;
 
3545,9 → 3585,10
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
return 0;
3646,6 → 3687,7
return r;
}
r100_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
3657,3 → 3699,43
}
return 0;
}
 
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
return ioread32(rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
return ioread32(rdev->rio_mem + RADEON_MM_DATA);
}
}
 
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
if (reg < rdev->rio_mem_size)
iowrite32(v, rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
}
}
/drivers/video/drm/radeon/r200.c
25,9 → 25,8
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
87,9 → 86,10
int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence)
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
96,16 → 96,16
int r = 0;
 
/* radeon pitch is /64 */
size = num_pages << PAGE_SHIFT;
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
r = radeon_ring_lock(rdev, num_loops * 4 + 64);
r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (1 << 16));
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
112,19 → 112,19
cur_size = 0x1FFFFF;
}
size -= cur_size;
radeon_ring_write(rdev, PACKET0(0x720, 2));
radeon_ring_write(rdev, src_offset);
radeon_ring_write(rdev, dst_offset);
radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
radeon_ring_write(ring, PACKET0(0x720, 2));
radeon_ring_write(ring, src_offset);
radeon_ring_write(ring, dst_offset);
radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
}
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
return r;
}
#if 0
156,7 → 156,7
u32 tile_flags = 0;
u32 idx_value;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
217,6 → 217,16
r100_cs_dump_packet(p, pkt);
return r;
}
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R200_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R200_TXO_MICRO_TILE;
 
tmp = idx_value & ~(0x7 << 2);
tmp |= tile_flags;
ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
} else
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
279,6 → 289,7
return r;
}
 
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= RADEON_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
287,6 → 298,8
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
} else
ib[idx] = idx_value;
 
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
track->cb_dirty = true;
/drivers/video/drm/radeon/r300.c
33,7 → 33,7
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include <drm/radeon_drm.h>
 
#include "r300d.h"
#include "rv350d.h"
74,7 → 74,7
 
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
void __iomem *ptr = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
93,7 → 93,7
{
int r;
 
if (rdev->gart.table.vram.robj) {
if (rdev->gart.robj) {
WARN(1, "RV370 PCIE GART already initialized\n");
return 0;
}
105,8 → 105,8
if (r)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
 
116,7 → 116,7
uint32_t tmp;
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
144,8 → 144,9
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
rv370_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
(unsigned)(rdev->mc.gtt_size >> 20), table_addr);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)table_addr);
rdev->gart.ready = true;
return 0;
}
153,7 → 154,6
void rv370_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0);
162,15 → 162,8
tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_gart_table_vram_unpin(rdev);
}
}
}
 
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
182,36 → 175,38
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
 
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(ring, 0);
/* Flush 3D cache */
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
 
void r300_ring_start(struct radeon_device *rdev)
void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned gb_tile_config;
int r;
234,44 → 229,44
break;
}
 
r = radeon_ring_lock(rdev, 64);
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
radeon_ring_write(rdev, gb_tile_config);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
radeon_ring_write(ring, gb_tile_config);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
radeon_ring_write(ring,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
280,8 → 275,8
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
radeon_ring_write(ring,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
289,19 → 284,19
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
radeon_ring_write(ring,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
}
 
void r300_errata(struct radeon_device *rdev)
static void r300_errata(struct radeon_device *rdev)
{
rdev->pll_errata = 0;
 
327,7 → 322,7
return -1;
}
 
void r300_gpu_init(struct radeon_device *rdev)
static void r300_gpu_init(struct radeon_device *rdev)
{
uint32_t gb_tile_config, tmp;
 
382,28 → 377,6
rdev->num_gb_pipes, rdev->num_z_pipes);
}
 
bool r300_gpu_is_lockup(struct radeon_device *rdev)
{
u32 rbbm_status;
int r;
 
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
}
 
int r300_asic_reset(struct radeon_device *rdev)
{
struct r100_mc_save save;
454,7 → 427,6
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
635,7 → 607,7
int r;
u32 idx_value;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
 
711,6 → 683,10
return r;
}
 
if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
721,6 → 697,7
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
ib[idx] = tmp;
}
track->textures[i].robj = reloc->robj;
track->tex_dirty = true;
break;
770,6 → 747,7
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
788,6 → 766,7
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
}
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
853,6 → 832,7
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
871,7 → 851,7
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
 
}
track->zb.pitch = idx_value & 0x3FFC;
track->zb_dirty = true;
break;
1169,7 → 1149,7
unsigned idx;
int r;
 
ib = p->ib->ptr;
ib = p->ib.ptr;
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
1409,11 → 1389,13
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
return 0;
}
 
1492,6 → 1474,7
return r;
}
r300_set_reg_safe(rdev);
 
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r420.c
27,7 → 27,7
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
160,6 → 160,8
 
static void r420_cp_errata_init(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
167,22 → 169,24
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
radeon_ring_write(ring, rdev->config.r300.resync_scratch);
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
}
 
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
 
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
radeon_ring_lock(rdev, 8);
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev);
radeon_ring_lock(rdev, ring, 8);
radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, R300_RB3D_DC_FINISH);
radeon_ring_unlock_commit(rdev, ring);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
 
225,44 → 229,21
return r;
}
r420_cp_errata_init(rdev);
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
return 0;
}
 
int r420_resume(struct radeon_device *rdev)
{
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
/* Resume clock before doing reset */
r420_clock_resume(rdev);
/* Reset gpu before posting otherwise ATOM will enter infinite loop */
if (radeon_asic_reset(rdev)) {
dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
RREG32(R_000E40_RBBM_STATUS),
RREG32(R_0007C0_CP_STAT));
}
/* check if cards are posted or not */
if (rdev->is_atom_bios) {
atom_asic_init(rdev->mode_info.atom_context);
} else {
radeon_combios_asic_init(rdev->ddev);
}
/* Resume clock after posting */
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
return r420_startup(rdev);
}
 
 
 
 
 
int r420_init(struct radeon_device *rdev)
{
int r;
341,6 → 322,7
return r;
}
r420_set_reg_safe(rdev);
 
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r500_reg.h
351,6 → 351,8
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c
#define AVIVO_D1CRTC_STATUS 0x609c
# define AVIVO_D1CRTC_V_BLANK (1 << 0)
#define AVIVO_D1CRTC_STATUS_POSITION 0x60a0
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4
573,6 → 575,7
 
#define AVIVO_TMDSA_CNTL 0x7880
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
633,6 → 636,7
 
#define AVIVO_LVTMA_CNTL 0x7a80
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
/drivers/video/drm/radeon/r520.c
25,7 → 25,7
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
33,7 → 33,7
 
/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
 
static int r520_mc_wait_for_idle(struct radeon_device *rdev)
int r520_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
119,7 → 119,7
rdev->mc.vram_width *= 2;
}
 
void r520_mc_init(struct radeon_device *rdev)
static void r520_mc_init(struct radeon_device *rdev)
{
 
r520_vram_get_type(rdev);
131,7 → 131,7
radeon_update_bandwidth_info(rdev);
}
 
void r520_mc_program(struct radeon_device *rdev)
static void r520_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
 
196,11 → 196,13
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
return 0;
}
 
272,6 → 274,7
if (r)
return r;
rv515_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
/drivers/video/drm/radeon/r600.c
28,8 → 28,9
#include <linux/slab.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
#include "drmP.h"
#include "radeon_drm.h"
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_mode.h"
47,6 → 48,7
#define EVERGREEN_PM4_UCODE_SIZE 1376
#define EVERGREEN_RLC_UCODE_SIZE 768
#define CAYMAN_RLC_UCODE_SIZE 1024
#define ARUBA_RLC_UCODE_SIZE 1536
 
/* Firmware Names */
MODULE_FIRMWARE("radeon/R600_pfp.bin");
95,7 → 97,7
 
/* r600,rv610,rv630,rv620,rv635,rv670 */
int r600_mc_wait_for_idle(struct radeon_device *rdev);
void r600_gpu_init(struct radeon_device *rdev);
static void r600_gpu_init(struct radeon_device *rdev);
void r600_fini(struct radeon_device *rdev);
void r600_irq_disable(struct radeon_device *rdev);
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
278,67 → 280,66
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
/* don't try to enable hpd on eDP or LVDS avoid breaking the
* aux dp channel on imac and help (but not completely fix)
* https://bugzilla.redhat.com/show_bug.cgi?id=726143
*/
continue;
}
if (ASIC_IS_DCE3(rdev)) {
u32 tmp = DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
if (ASIC_IS_DCE32(rdev))
tmp |= DC_HPDx_EN;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, tmp);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, tmp);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, tmp);
rdev->irq.hpd[2] = true;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, tmp);
rdev->irq.hpd[3] = true;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, tmp);
rdev->irq.hpd[4] = true;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, tmp);
rdev->irq.hpd[5] = true;
break;
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[1] = true;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, DC_HOT_PLUG_DETECTx_EN);
rdev->irq.hpd[2] = true;
break;
default:
break;
}
}
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
r600_irq_set(rdev);
// radeon_irq_kms_enable_hpd(rdev, enable);
}
 
void r600_hpd_fini(struct radeon_device *rdev)
345,61 → 346,52
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disable = 0;
 
if (ASIC_IS_DCE3(rdev)) {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (ASIC_IS_DCE3(rdev)) {
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HPD1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HPD2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HPD3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
case RADEON_HPD_4:
WREG32(DC_HPD4_CONTROL, 0);
rdev->irq.hpd[3] = false;
break;
/* DCE 3.2 */
case RADEON_HPD_5:
WREG32(DC_HPD5_CONTROL, 0);
rdev->irq.hpd[4] = false;
break;
case RADEON_HPD_6:
WREG32(DC_HPD6_CONTROL, 0);
rdev->irq.hpd[5] = false;
break;
default:
break;
}
}
} else {
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
switch (radeon_connector->hpd.hpd) {
case RADEON_HPD_1:
WREG32(DC_HOT_PLUG_DETECT1_CONTROL, 0);
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(DC_HOT_PLUG_DETECT2_CONTROL, 0);
rdev->irq.hpd[1] = false;
break;
case RADEON_HPD_3:
WREG32(DC_HOT_PLUG_DETECT3_CONTROL, 0);
rdev->irq.hpd[2] = false;
break;
default:
break;
}
}
disable |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disable);
}
 
/*
413,7 → 405,7
/* flush hdp cache so updates hit vram */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
!(rdev->flags & RADEON_IS_AGP)) {
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
void __iomem *ptr = (void *)rdev->gart.ptr;
u32 tmp;
 
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
448,7 → 440,7
{
int r;
 
if (rdev->gart.table.vram.robj) {
if (rdev->gart.robj) {
WARN(1, "R600 PCIE GART already initialized\n");
return 0;
}
460,12 → 452,12
return radeon_gart_table_vram_alloc(rdev);
}
 
int r600_pcie_gart_enable(struct radeon_device *rdev)
static int r600_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
510,14 → 502,17
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 
r600_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
void r600_pcie_gart_disable(struct radeon_device *rdev)
static void r600_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i, r;
int i;
 
/* Disable all tables */
for (i = 0; i < 7; i++)
544,17 → 539,10
WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp);
WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_gart_table_vram_unpin(rdev);
}
}
}
 
void r600_pcie_gart_fini(struct radeon_device *rdev)
static void r600_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
561,7 → 549,7
radeon_gart_table_vram_free(rdev);
}
 
void r600_agp_enable(struct radeon_device *rdev)
static void r600_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
651,7 → 639,7
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
709,7 → 697,7
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
size_af = 0xFFFFFFFF - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
723,7 → 711,7
mc->real_vram_size = size_af;
mc->mc_vram_size = size_af;
}
mc->vram_start = mc->gtt_end;
mc->vram_start = mc->gtt_end + 1;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
741,7 → 729,7
}
}
 
int r600_mc_init(struct radeon_device *rdev)
static int r600_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
790,11 → 778,41
return 0;
}
 
int r600_vram_scratch_init(struct radeon_device *rdev)
{
int r;
 
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->vram_scratch.robj);
if (r) {
return r;
}
}
 
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->vram_scratch.robj,
RADEON_GEM_DOMAIN_VRAM, &rdev->vram_scratch.gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
 
return r;
}
/* We doesn't check that the GPU really needs a reset we simply do the
* reset, it's up to the caller to determine if the GPU needs one. We
* might add an helper function to check that.
*/
int r600_gpu_soft_reset(struct radeon_device *rdev)
static int r600_gpu_soft_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
u32 grbm_busy_mask = S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
825,6 → 843,14
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
rv515_mc_stop(rdev, &save);
if (r600_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
868,41 → 894,35
RREG32(R_008014_GRBM_STATUS2));
dev_info(rdev->dev, " R_000E50_SRBM_STATUS=0x%08X\n",
RREG32(R_000E50_SRBM_STATUS));
dev_info(rdev->dev, " R_008674_CP_STALLED_STAT1 = 0x%08X\n",
RREG32(CP_STALLED_STAT1));
dev_info(rdev->dev, " R_008678_CP_STALLED_STAT2 = 0x%08X\n",
RREG32(CP_STALLED_STAT2));
dev_info(rdev->dev, " R_00867C_CP_BUSY_STAT = 0x%08X\n",
RREG32(CP_BUSY_STAT));
dev_info(rdev->dev, " R_008680_CP_STAT = 0x%08X\n",
RREG32(CP_STAT));
rv515_mc_resume(rdev, &save);
return 0;
}
 
bool r600_gpu_is_lockup(struct radeon_device *rdev)
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
struct r100_gpu_lockup *lockup;
int r;
 
if (rdev->family >= CHIP_RV770)
lockup = &rdev->config.rv770.lockup;
else
lockup = &rdev->config.r600.lockup;
 
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
r100_gpu_lockup_update(lockup, &rdev->cp);
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
r = radeon_ring_lock(rdev, 2);
if (!r) {
/* PACKET2 NOP */
radeon_ring_write(rdev, 0x80000000);
radeon_ring_write(rdev, 0x80000000);
radeon_ring_unlock_commit(rdev);
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
 
int r600_asic_reset(struct radeon_device *rdev)
{
909,113 → 929,51
return r600_gpu_soft_reset(rdev);
}
 
static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
u32 total_max_rb_num,
u32 disabled_rb_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R6XX_MAX_PIPES];
u32 cur_backend;
u32 i;
u32 rendering_pipe_num, rb_num_width, req_rb_num;
u32 pipe_rb_ratio, pipe_rb_remain;
u32 data = 0, mask = 1 << (max_rb_num - 1);
unsigned i, j;
 
if (num_tile_pipes > R6XX_MAX_PIPES)
num_tile_pipes = R6XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R6XX_MAX_BACKENDS)
num_backends = R6XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
/* mask out the RBs that don't exist on that asic */
disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
 
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R6XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
rendering_pipe_num = 1 << tiling_pipe_num;
req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
BUG_ON(rendering_pipe_num < req_rb_num);
 
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
pipe_rb_ratio = rendering_pipe_num / req_rb_num;
pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
 
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
break;
case 4:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
break;
case 5:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
break;
case 6:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
break;
case 7:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
break;
case 8:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 1;
swizzle_pipe[5] = 3;
swizzle_pipe[6] = 5;
swizzle_pipe[7] = 7;
break;
if (rdev->family <= CHIP_RV740) {
/* r6xx/r7xx */
rb_num_width = 2;
} else {
/* eg+ */
rb_num_width = 4;
}
 
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
 
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
 
cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS;
for (i = 0; i < max_rb_num; i++) {
if (!(mask & disabled_rb_mask)) {
for (j = 0; j < pipe_rb_ratio; j++) {
data <<= rb_num_width;
data |= max_rb_num - i - 1;
}
if (pipe_rb_remain) {
data <<= rb_num_width;
data |= max_rb_num - i - 1;
pipe_rb_remain--;
}
}
mask >>= 1;
}
 
return backend_map;
return data;
}
 
int r600_count_pipe_bits(uint32_t val)
1029,11 → 987,10
return ret;
}
 
void r600_gpu_init(struct radeon_device *rdev)
static void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
u32 backend_map;
u32 cc_rb_backend_disable;
u32 cc_gc_shader_pipe_config;
u32 tmp;
1044,8 → 1001,9
u32 sq_thread_resource_mgmt = 0;
u32 sq_stack_resource_mgmt_1 = 0;
u32 sq_stack_resource_mgmt_2 = 0;
u32 disabled_rb_mask;
 
/* FIXME: implement */
rdev->config.r600.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_R600:
rdev->config.r600.max_pipes = 4;
1149,10 → 1107,7
rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.r600.tiling_group_size = 512;
else
rdev->config.r600.tiling_group_size = 256;
 
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
1164,32 → 1119,36
tiling_config |= BANK_SWAPS(1);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
tmp = R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
if (tmp < rdev->config.r600.max_backends) {
rdev->config.r600.max_backends = tmp;
}
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
tmp = R6XX_MAX_PIPES -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
if (tmp < rdev->config.r600.max_pipes) {
rdev->config.r600.max_pipes = tmp;
}
tmp = R6XX_MAX_SIMDS -
r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.r600.max_simds) {
rdev->config.r600.max_simds = tmp;
}
 
backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
(R6XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R6XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
R6XX_MAX_BACKENDS, disabled_rb_mask);
tiling_config |= tmp << 16;
rdev->config.r600.backend_map = tmp;
 
rdev->config.r600.tile_config = tiling_config;
rdev->config.r600.backend_map = backend_map;
tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
 
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
 
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
1433,6 → 1392,7
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
WREG32(VC_ENHANCE, 0);
}
 
 
1672,27 → 1632,28
 
int r600_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
 
r = radeon_ring_lock(rdev, 7);
r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(rdev, 0x1);
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
if (rdev->family >= CHIP_RV770) {
radeon_ring_write(rdev, 0x0);
radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
} else {
radeon_ring_write(rdev, 0x3);
radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
radeon_ring_write(ring, 0x3);
radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
}
radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
 
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
1701,6 → 1662,7
 
int r600_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
1712,13 → 1674,13
WREG32(GRBM_SOFT_RESET, 0);
 
/* Set ring buffer size */
rb_bufsz = drm_order(rdev->cp.ring_size / 8);
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
WREG32(CP_SEM_WAIT_TIMER, 0x4);
WREG32(CP_SEM_WAIT_TIMER, 0x0);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
1726,7 → 1688,8
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
WREG32(CP_RB_WPTR, 0);
ring->wptr = 0;
WREG32(CP_RB_WPTR, ring->wptr);
 
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
1744,43 → 1707,47
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
 
WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
 
rdev->cp.rptr = RREG32(CP_RB_RPTR);
rdev->cp.wptr = RREG32(CP_RB_WPTR);
ring->rptr = RREG32(CP_RB_RPTR);
 
r600_cp_start(rdev);
rdev->cp.ready = true;
r = radeon_ring_test(rdev);
ring->ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, ring);
if (r) {
rdev->cp.ready = false;
ring->ready = false;
return r;
}
return 0;
}
 
void r600_cp_commit(struct radeon_device *rdev)
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
{
WREG32(CP_RB_WPTR, rdev->cp.wptr);
(void)RREG32(CP_RB_WPTR);
}
 
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
{
u32 rb_bufsz;
int r;
 
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
rdev->cp.ring_size = ring_size;
rdev->cp.align_mask = 16 - 1;
ring->ring_size = ring_size;
ring->align_mask = 16 - 1;
 
if (radeon_ring_supports_scratch_reg(rdev, ring)) {
r = radeon_scratch_get(rdev, &ring->rptr_save_reg);
if (r) {
DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r);
ring->rptr_save_reg = 0;
}
}
}
 
void r600_cp_fini(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r600_cp_stop(rdev);
radeon_ring_fini(rdev);
radeon_ring_fini(rdev, ring);
radeon_scratch_free(rdev, ring->rptr_save_reg);
}
 
 
1799,7 → 1766,7
}
}
 
int r600_ring_test(struct radeon_device *rdev)
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
1812,16 → 1779,16
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ring_lock(rdev, 3);
r = radeon_ring_lock(rdev, ring, 3);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ring->idx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, 0xDEADBEEF);
radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
1829,10 → 1796,10
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ring test succeeded in %d usecs\n", i);
DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i);
} else {
DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
ring->idx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
1842,51 → 1809,82
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
 
if (rdev->wb.use_event) {
u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
(u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(rdev, addr & 0xffffffff);
radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
} else {
radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(ring, RB_INT_STAT);
}
}
 
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *ring,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
uint64_t addr = semaphore->gpu_addr;
unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
 
if (rdev->family < CHIP_CAYMAN)
sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
 
radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
}
 
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence)
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence)
{
struct radeon_semaphore *sem = NULL;
struct radeon_sa_bo *vb = NULL;
int r;
 
mutex_lock(&rdev->r600_blit.mutex);
rdev->r600_blit.vb_ib = NULL;
r = r600_blit_prepare_copy(rdev, num_pages * RADEON_GPU_PAGE_SIZE);
r = r600_blit_prepare_copy(rdev, num_gpu_pages, fence, &vb, &sem);
if (r) {
// if (rdev->r600_blit.vb_ib)
// radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
mutex_unlock(&rdev->r600_blit.mutex);
return r;
}
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_pages * RADEON_GPU_PAGE_SIZE);
r600_blit_done_copy(rdev, fence);
mutex_unlock(&rdev->r600_blit.mutex);
r600_kms_blit_copy(rdev, src_offset, dst_offset, num_gpu_pages, vb);
r600_blit_done_copy(rdev, fence, vb, sem);
return 0;
}
 
1903,8 → 1901,9
/* FIXME: implement */
}
 
int r600_startup(struct radeon_device *rdev)
static int r600_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
1930,17 → 1929,10
r = r600_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy = NULL;
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
r = r600_video_init(rdev);
if (r) {
// r600_video_fini(rdev);
// rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed video blitter (%d) falling back to memcpy\n", r);
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
1955,7 → 1947,10
}
r600_irq_set(rdev);
 
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
 
if (r)
return r;
r = r600_cp_load_microcode(rdev);
1999,10 → 1994,6
if (r600_debugfs_mc_info_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for mc !\n");
}
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
2052,8 → 2043,8
if (r)
return r;
 
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
2066,25 → 2057,9
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
// r600_suspend(rdev);
// r600_wb_fini(rdev);
// radeon_ring_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
}
}
}
 
return 0;
}
2094,20 → 2069,37
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
/* FIXME: implement */
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 next_rptr;
 
if (ring->rptr_save_reg) {
next_rptr = ring->wptr + 3 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(ring, next_rptr);
} else if (rdev->wb.enabled) {
next_rptr = ring->wptr + 5 + 4;
radeon_ring_write(ring, PACKET3(PACKET3_MEM_WRITE, 3));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, (upper_32_bits(ring->next_rptr_gpu_addr) & 0xff) | (1 << 18));
radeon_ring_write(ring, next_rptr);
radeon_ring_write(ring, 0);
}
 
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(rdev, ib->length_dw);
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
radeon_ring_write(ring, ib->length_dw);
}
 
int r600_ib_test(struct radeon_device *rdev)
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
struct radeon_ib *ib;
struct radeon_ib ib;
uint32_t scratch;
uint32_t tmp = 0;
unsigned i;
2119,39 → 2111,24
return r;
}
WREG32(scratch, 0xCAFEDEAD);
r = radeon_ib_get(rdev, &ib);
r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
goto free_scratch;
}
ib->ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib->ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib->ptr[2] = 0xDEADBEEF;
ib->ptr[3] = PACKET2(0);
ib->ptr[4] = PACKET2(0);
ib->ptr[5] = PACKET2(0);
ib->ptr[6] = PACKET2(0);
ib->ptr[7] = PACKET2(0);
ib->ptr[8] = PACKET2(0);
ib->ptr[9] = PACKET2(0);
ib->ptr[10] = PACKET2(0);
ib->ptr[11] = PACKET2(0);
ib->ptr[12] = PACKET2(0);
ib->ptr[13] = PACKET2(0);
ib->ptr[14] = PACKET2(0);
ib->ptr[15] = PACKET2(0);
ib->length_dw = 16;
r = radeon_ib_schedule(rdev, ib);
ib.ptr[0] = PACKET3(PACKET3_SET_CONFIG_REG, 1);
ib.ptr[1] = ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = radeon_ib_schedule(rdev, &ib, NULL);
if (r) {
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
return r;
goto free_ib;
}
r = radeon_fence_wait(ib->fence, false);
r = radeon_fence_wait(ib.fence, false);
if (r) {
DRM_ERROR("radeon: fence wait failed (%d).\n", r);
return r;
goto free_ib;
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
2160,14 → 2137,16
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
DRM_INFO("ib test succeeded in %u usecs\n", i);
DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib.fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
r = -EINVAL;
}
free_ib:
radeon_ib_free(rdev, &ib);
free_scratch:
radeon_scratch_free(rdev, scratch);
radeon_ib_free(rdev, &ib);
return r;
}
 
2194,7 → 2173,7
rdev->ih.rptr = 0;
}
 
static int r600_ih_ring_alloc(struct radeon_device *rdev)
int r600_ih_ring_alloc(struct radeon_device *rdev)
{
int r;
 
2203,7 → 2182,7
r = radeon_bo_create(rdev, rdev->ih.ring_size,
PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->ih.ring_obj);
NULL, &rdev->ih.ring_obj);
if (r) {
DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r);
return r;
2230,7 → 2209,7
return 0;
}
 
static void r600_ih_ring_fini(struct radeon_device *rdev)
void r600_ih_ring_fini(struct radeon_device *rdev)
{
int r;
if (rdev->ih.ring_obj) {
2254,7 → 2233,7
/* r7xx asics need to soft reset RLC before halting */
WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
RREG32(SRBM_SOFT_RESET);
udelay(15000);
mdelay(15);
WREG32(SRBM_SOFT_RESET, 0);
RREG32(SRBM_SOFT_RESET);
}
2277,10 → 2256,17
 
r600_rlc_stop(rdev);
 
WREG32(RLC_HB_CNTL, 0);
 
if (rdev->family == CHIP_ARUBA) {
WREG32(TN_RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(TN_RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
}
if (rdev->family <= CHIP_CAYMAN) {
WREG32(RLC_HB_BASE, 0);
WREG32(RLC_HB_CNTL, 0);
WREG32(RLC_HB_RPTR, 0);
WREG32(RLC_HB_WPTR, 0);
}
if (rdev->family <= CHIP_CAICOS) {
WREG32(RLC_HB_WPTR_LSB_ADDR, 0);
WREG32(RLC_HB_WPTR_MSB_ADDR, 0);
2289,7 → 2275,12
WREG32(RLC_UCODE_CNTL, 0);
 
fw_data = (const __be32 *)rdev->rlc_fw->data;
if (rdev->family >= CHIP_CAYMAN) {
if (rdev->family >= CHIP_ARUBA) {
for (i = 0; i < ARUBA_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
} else if (rdev->family >= CHIP_CAYMAN) {
for (i = 0; i < CAYMAN_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
2342,7 → 2333,6
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
rdev->ih.wptr = 0;
rdev->ih.rptr = 0;
}
 
2371,6 → 2361,15
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
} else {
tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
2381,6 → 2380,10
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
}
 
2450,6 → 2453,9
else
r600_disable_interrupt_state(rdev);
 
/* at this point everything should be setup correctly to enable master */
pci_set_master(rdev->pdev);
 
/* enable irqs */
r600_enable_interrupts(rdev);
 
2461,7 → 2467,7
u32 mode_int = 0;
u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
u32 grbm_int_cntl = 0;
u32 hdmi1, hdmi2;
u32 hdmi0, hdmi1;
u32 d1grph = 0, d2grph = 0;
 
if (!rdev->irq.installed) {
2476,9 → 2482,7
return 0;
}
 
hdmi1 = RREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
if (ASIC_IS_DCE3(rdev)) {
hdmi2 = RREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
2486,26 → 2490,32
if (ASIC_IS_DCE32(rdev)) {
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
hdmi0 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1) & ~AFMT_AZ_FORMAT_WTRIG_MASK;
} else {
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
} else {
hdmi2 = RREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL) & ~R600_HDMI_INT_EN;
hpd1 = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
hdmi0 = RREG32(HDMI0_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
hdmi1 = RREG32(HDMI1_AUDIO_PACKET_CONTROL) & ~HDMI0_AZ_FORMAT_WTRIG_MASK;
}
 
if (rdev->irq.sw_int) {
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("r600_irq_set: vblank 0\n");
mode_int |= D1MODE_VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("r600_irq_set: vblank 1\n");
mode_int |= D2MODE_VBLANK_INT_MASK;
}
2533,18 → 2543,14
DRM_DEBUG("r600_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hdmi[0]) {
DRM_DEBUG("r600_irq_set: hdmi 1\n");
hdmi1 |= R600_HDMI_INT_EN;
if (rdev->irq.afmt[0]) {
DRM_DEBUG("r600_irq_set: hdmi 0\n");
hdmi0 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.hdmi[1]) {
DRM_DEBUG("r600_irq_set: hdmi 2\n");
hdmi2 |= R600_HDMI_INT_EN;
if (rdev->irq.afmt[1]) {
DRM_DEBUG("r600_irq_set: hdmi 0\n");
hdmi1 |= HDMI0_AZ_FORMAT_WTRIG_MASK;
}
if (rdev->irq.gui_idle) {
DRM_DEBUG("gui idle\n");
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
 
WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(DxMODE_INT_MASK, mode_int);
2551,9 → 2557,7
WREG32(D1GRPH_INTERRUPT_CONTROL, d1grph);
WREG32(D2GRPH_INTERRUPT_CONTROL, d2grph);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, hdmi1);
if (ASIC_IS_DCE3(rdev)) {
WREG32(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
2561,18 → 2565,24
if (ASIC_IS_DCE32(rdev)) {
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, hdmi0);
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, hdmi1);
} else {
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
} else {
WREG32(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, hdmi2);
WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, hpd3);
WREG32(HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(HDMI1_AUDIO_PACKET_CONTROL, hdmi1);
}
 
return 0;
}
 
static inline void r600_irq_ack(struct radeon_device *rdev)
static void r600_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
 
2580,10 → 2590,19
rdev->irq.stat_regs.r600.disp_int = RREG32(DCE3_DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2);
if (ASIC_IS_DCE32(rdev)) {
rdev->irq.stat_regs.r600.hdmi0_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET0);
rdev->irq.stat_regs.r600.hdmi1_status = RREG32(AFMT_STATUS + DCE3_HDMI_OFFSET1);
} else {
rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
rdev->irq.stat_regs.r600.hdmi1_status = RREG32(DCE3_HDMI1_STATUS);
}
} else {
rdev->irq.stat_regs.r600.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.r600.disp_int_cont2 = 0;
rdev->irq.stat_regs.r600.hdmi0_status = RREG32(HDMI0_STATUS);
rdev->irq.stat_regs.r600.hdmi1_status = RREG32(HDMI1_STATUS);
}
rdev->irq.stat_regs.r600.d1grph_int = RREG32(D1GRPH_INTERRUPT_STATUS);
rdev->irq.stat_regs.r600.d2grph_int = RREG32(D2GRPH_INTERRUPT_STATUS);
2649,22 → 2668,37
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.hdmi0_status & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET0, tmp);
}
if (RREG32(R600_HDMI_BLOCK1 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK1 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
if (rdev->irq.stat_regs.r600.hdmi1_status & AFMT_AZ_FORMAT_WTRIG) {
tmp = RREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1);
tmp |= AFMT_AZ_FORMAT_WTRIG_ACK;
WREG32(AFMT_AUDIO_PACKET_CONTROL + DCE3_HDMI_OFFSET1, tmp);
}
} else {
if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
tmp = RREG32(HDMI0_AUDIO_PACKET_CONTROL);
tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
WREG32(HDMI0_AUDIO_PACKET_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
if (ASIC_IS_DCE3(rdev)) {
if (RREG32(R600_HDMI_BLOCK3 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK3 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
}
tmp = RREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL);
tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
WREG32(DCE3_HDMI1_AUDIO_PACKET_CONTROL, tmp);
} else {
if (RREG32(R600_HDMI_BLOCK2 + R600_HDMI_STATUS) & R600_HDMI_INT_PENDING) {
WREG32_P(R600_HDMI_BLOCK2 + R600_HDMI_CNTL, R600_HDMI_INT_ACK, ~R600_HDMI_INT_ACK);
tmp = RREG32(HDMI1_AUDIO_PACKET_CONTROL);
tmp |= HDMI0_AZ_FORMAT_WTRIG_ACK;
WREG32(HDMI1_AUDIO_PACKET_CONTROL, tmp);
}
}
}
}
 
static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
static u32 r600_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
 
2726,8 → 2760,8
u32 rptr;
u32 src_id, src_data;
u32 ring_index;
unsigned long flags;
bool queue_hotplug = false;
bool queue_hdmi = false;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
2737,17 → 2771,15
RREG32(IH_RB_WPTR);
 
wptr = r600_get_ih_wptr(rdev);
rptr = rdev->ih.rptr;
// DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
spin_lock_irqsave(&rdev->ih.lock, flags);
 
if (rptr == wptr) {
spin_unlock_irqrestore(&rdev->ih.lock, flags);
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
}
 
restart_ih:
rptr = rdev->ih.rptr;
DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
/* Order reading of wptr vs. reading of IH ring data */
rmb();
 
2754,7 → 2786,6
/* display interrupts */
r600_irq_ack(rdev);
 
rdev->ih.wptr = wptr;
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
2863,24 → 2894,39
break;
}
break;
case 21: /* HDMI */
DRM_DEBUG("IH: HDMI: 0x%x\n", src_data);
// r600_audio_schedule_polling(rdev);
case 21: /* hdmi */
switch (src_data) {
case 4:
if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI0\n");
}
break;
case 5:
if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
queue_hdmi = true;
DRM_DEBUG("IH: HDMI1\n");
}
break;
default:
DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
2891,15 → 2937,15
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
 
/* make sure wptr hasn't changed while processing */
wptr = r600_get_ih_wptr(rdev);
if (wptr != rdev->ih.wptr)
if (wptr != rptr)
goto restart_ih;
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
spin_unlock_irqrestore(&rdev->ih.lock, flags);
 
return IRQ_HANDLED;
}
 
2908,30 → 2954,6
*/
#if defined(CONFIG_DEBUG_FS)
 
static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
unsigned count, i, j;
 
radeon_ring_free_size(rdev);
count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
i = rdev->cp.rptr;
for (j = 0; j <= count; j++) {
seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
i = (i + 1) & rdev->cp.ptr_mask;
}
return 0;
}
 
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
2945,7 → 2967,6
 
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
{"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
};
#endif
 
3107,6 → 3128,8
{
u32 link_width_cntl, lanes, speed_cntl, training_cntl, tmp;
u16 link_cntl2;
u32 mask;
int ret;
 
if (radeon_pcie_gen2 == 0)
return;
3125,6 → 3148,21
if (rdev->family <= CHIP_R600)
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & DRM_PCIE_SPEED_50))
return;
 
speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
if (speed_cntl & LC_CURRENT_DATA_RATE) {
DRM_INFO("PCIE gen 2 link speeds already enabled\n");
return;
}
 
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
/* 55 nm r6xx asics */
if ((rdev->family == CHIP_RV670) ||
(rdev->family == CHIP_RV620) ||
3204,3 → 3242,23
WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
}
}
 
/**
* r600_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (R6xx-cayman).
* Returns the 64 bit clock counter snapshot.
*/
uint64_t r600_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
 
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
/drivers/video/drm/radeon/r600_audio.c
29,7 → 29,28
#include "radeon_asic.h"
#include "atom.h"
 
#define AUDIO_TIMER_INTERVALL 100 /* 1/10 sekund should be enough */
/*
* check if enc_priv stores radeon_encoder_atom_dig
*/
static bool radeon_dig_encoder(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return true;
}
return false;
}
 
/*
* check if the chipset is supported
36,115 → 57,91
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
}
 
/*
* current number of channels
*/
int r600_audio_channels(struct radeon_device *rdev)
struct r600_audio r600_audio_status(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0x7) + 1;
}
struct r600_audio status;
uint32_t value;
 
/*
* current bits per sample
*/
int r600_audio_bits_per_sample(struct radeon_device *rdev)
{
uint32_t value = (RREG32(R600_AUDIO_RATE_BPS_CHANNEL) & 0xF0) >> 4;
switch (value) {
case 0x0: return 8;
case 0x1: return 16;
case 0x2: return 20;
case 0x3: return 24;
case 0x4: return 32;
}
value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
 
dev_err(rdev->dev, "Unknown bits per sample 0x%x using 16 instead\n",
/* number of channels */
status.channels = (value & 0x7) + 1;
 
/* bits per sample */
switch ((value & 0xF0) >> 4) {
case 0x0:
status.bits_per_sample = 8;
break;
case 0x1:
status.bits_per_sample = 16;
break;
case 0x2:
status.bits_per_sample = 20;
break;
case 0x3:
status.bits_per_sample = 24;
break;
case 0x4:
status.bits_per_sample = 32;
break;
default:
dev_err(rdev->dev, "Unknown bits per sample 0x%x, using 16\n",
(int)value);
 
return 16;
status.bits_per_sample = 16;
}
 
/*
* current sampling rate in HZ
*/
int r600_audio_rate(struct radeon_device *rdev)
{
uint32_t value = RREG32(R600_AUDIO_RATE_BPS_CHANNEL);
uint32_t result;
 
/* current sampling rate in HZ */
if (value & 0x4000)
result = 44100;
status.rate = 44100;
else
result = 48000;
status.rate = 48000;
status.rate *= ((value >> 11) & 0x7) + 1;
status.rate /= ((value >> 8) & 0x7) + 1;
 
result *= ((value >> 11) & 0x7) + 1;
result /= ((value >> 8) & 0x7) + 1;
value = RREG32(R600_AUDIO_STATUS_BITS);
 
return result;
}
/* iec 60958 status bits */
status.status_bits = value & 0xff;
 
/*
* iec 60958 status bits
*/
uint8_t r600_audio_status_bits(struct radeon_device *rdev)
{
return RREG32(R600_AUDIO_STATUS_BITS) & 0xff;
}
/* iec 60958 category code */
status.category_code = (value >> 8) & 0xff;
 
/*
* iec 60958 category code
*/
uint8_t r600_audio_category_code(struct radeon_device *rdev)
{
return (RREG32(R600_AUDIO_STATUS_BITS) >> 8) & 0xff;
return status;
}
 
/*
* update all hdmi interfaces with current audio parameters
*/
static void r600_audio_update_hdmi(unsigned long param)
void r600_audio_update_hdmi(struct work_struct *work)
{
struct radeon_device *rdev = (struct radeon_device *)param;
struct radeon_device *rdev = container_of(work, struct radeon_device,
audio_work);
struct drm_device *dev = rdev->ddev;
 
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
int bps = r600_audio_bits_per_sample(rdev);
uint8_t status_bits = r600_audio_status_bits(rdev);
uint8_t category_code = r600_audio_category_code(rdev);
 
struct r600_audio audio_status = r600_audio_status(rdev);
struct drm_encoder *encoder;
int changes = 0, still_going = 0;
bool changed = false;
 
changes |= channels != rdev->audio_channels;
changes |= rate != rdev->audio_rate;
changes |= bps != rdev->audio_bits_per_sample;
changes |= status_bits != rdev->audio_status_bits;
changes |= category_code != rdev->audio_category_code;
 
if (changes) {
rdev->audio_channels = channels;
rdev->audio_rate = rate;
rdev->audio_bits_per_sample = bps;
rdev->audio_status_bits = status_bits;
rdev->audio_category_code = category_code;
if (rdev->audio_status.channels != audio_status.channels ||
rdev->audio_status.rate != audio_status.rate ||
rdev->audio_status.bits_per_sample != audio_status.bits_per_sample ||
rdev->audio_status.status_bits != audio_status.status_bits ||
rdev->audio_status.category_code != audio_status.category_code) {
rdev->audio_status = audio_status;
changed = true;
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
still_going |= radeon_encoder->audio_polling_active;
if (changes || r600_hdmi_buffer_status_changed(encoder))
if (!radeon_dig_encoder(encoder))
continue;
if (changed || r600_hdmi_buffer_status_changed(encoder))
r600_hdmi_update_audio_settings(encoder);
}
 
// mod_timer(&rdev->audio_timer,
// jiffies + msecs_to_jiffies(AUDIO_TIMER_INTERVALL));
}
 
/*
152,13 → 149,23
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
u32 value = 0;
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
value |= 0x0e1000f0; /* fglrx sets that too */
}
WREG32(EVERGREEN_AUDIO_ENABLE, value);
} else {
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
rdev->audio_enabled = enable;
}
 
/*
* initialize the audio vars and register the update timer
* initialize the audio vars
*/
int r600_audio_init(struct radeon_device *rdev)
{
167,19 → 174,12
 
r600_audio_engine_enable(rdev, true);
 
rdev->audio_channels = -1;
rdev->audio_rate = -1;
rdev->audio_bits_per_sample = -1;
rdev->audio_status_bits = 0;
rdev->audio_category_code = 0;
rdev->audio_status.channels = -1;
rdev->audio_status.rate = -1;
rdev->audio_status.bits_per_sample = -1;
rdev->audio_status.status_bits = 0;
rdev->audio_status.category_code = 0;
 
// setup_timer(
// &rdev->audio_timer,
// r600_audio_update_hdmi,
// (unsigned long)rdev);
 
// mod_timer(&rdev->audio_timer, jiffies + 1);
 
return 0;
}
 
192,6 → 192,7
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
int base_rate = 48000;
 
switch (radeon_encoder->encoder_id) {
211,6 → 212,15
return;
}
 
if (ASIC_IS_DCE4(rdev)) {
/* TODO: other PLLs? */
WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
 
/* Select DTO source */
WREG32(0x5ac, radeon_crtc->crtc_id);
} else {
switch (dig->dig_encoder) {
case 0:
WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
224,11 → 234,13
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
default:
dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
dev_err(rdev->dev,
"Unsupported DIG on encoder 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
}
}
 
/*
* release the audio timer
239,7 → 251,5
if (!rdev->audio_enabled)
return;
 
// del_timer(&rdev->audio_timer);
 
r600_audio_engine_enable(rdev, false);
}
/drivers/video/drm/radeon/r600_blit_kms.c
23,30 → 23,20
*
*/
 
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
#include "r600d.h"
#include "r600_blit_shaders.h"
#include "radeon_blit_common.h"
 
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
 
#define FMT_8 0x1
#define FMT_5_6_5 0x8
#define FMT_8_8_8_8 0x1a
#define COLOR_8 0x1
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
 
/* emits 21 on rv770+, 23 on r600 */
static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
 
54,42 → 44,44
if (h < 8)
h = 8;
 
cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
cb_color_info = CB_FORMAT(format) |
CB_SOURCE_FORMAT(CB_SF_EXPORT_NORM) |
CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
 
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
radeon_ring_write(rdev, 2 << 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
radeon_ring_write(ring, 2 << 0);
}
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (pitch << 0) | (slice << 10));
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, cb_color_info);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, cb_color_info);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
}
 
/* emits 5dw */
98,6 → 90,7
u32 sync_type, u32 size,
u64 mc_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
 
if (size == 0xffffffff)
105,11 → 98,11
else
cp_coher_size = ((size + 255) >> 8);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(rdev, sync_type);
radeon_ring_write(rdev, cp_coher_size);
radeon_ring_write(rdev, mc_addr >> 8);
radeon_ring_write(rdev, 10); /* poll interval */
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, sync_type);
radeon_ring_write(ring, cp_coher_size);
radeon_ring_write(ring, mc_addr >> 8);
radeon_ring_write(ring, 10); /* poll interval */
}
 
/* emits 21dw + 1 surface sync = 26dw */
116,6 → 109,7
static void
set_shaders(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
 
124,35 → 118,35
 
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, sq_pgm_resources);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, sq_pgm_resources);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
 
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, gpu_addr >> 8);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 2);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 2);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, 0);
 
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
162,22 → 156,24
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
 
sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
SQ_VTXC_STRIDE(16);
#ifdef __BIG_ENDIAN
sq_vtx_constant_word2 |= (2 << 30);
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0x460);
radeon_ring_write(rdev, gpu_addr & 0xffffffff);
radeon_ring_write(rdev, 48 - 1);
radeon_ring_write(rdev, sq_vtx_constant_word2);
radeon_ring_write(rdev, 1 << 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(ring, 0x460);
radeon_ring_write(ring, gpu_addr & 0xffffffff);
radeon_ring_write(ring, 48 - 1);
radeon_ring_write(ring, sq_vtx_constant_word2);
radeon_ring_write(ring, 1 << 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
 
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
195,35 → 191,40
static void
set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr)
u64 gpu_addr, u32 size)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
 
if (h < 1)
h = 1;
 
sq_tex_resource_word0 = (1 << 0) | (1 << 3);
sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
((w - 1) << 19));
sq_tex_resource_word0 = S_038000_DIM(V_038000_SQ_TEX_DIM_2D) |
S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
sq_tex_resource_word0 |= S_038000_PITCH((pitch >> 3) - 1) |
S_038000_TEX_WIDTH(w - 1);
 
sq_tex_resource_word1 = (format << 26);
sq_tex_resource_word1 |= ((h - 1) << 0);
sq_tex_resource_word1 = S_038004_DATA_FORMAT(format);
sq_tex_resource_word1 |= S_038004_TEX_HEIGHT(h - 1);
 
sq_tex_resource_word4 = ((1 << 14) |
(0 << 16) |
(1 << 19) |
(2 << 22) |
(3 << 25));
sq_tex_resource_word4 = S_038010_REQUEST_SIZE(1) |
S_038010_DST_SEL_X(SQ_SEL_X) |
S_038010_DST_SEL_Y(SQ_SEL_Y) |
S_038010_DST_SEL_Z(SQ_SEL_Z) |
S_038010_DST_SEL_W(SQ_SEL_W);
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, sq_tex_resource_word0);
radeon_ring_write(rdev, sq_tex_resource_word1);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, gpu_addr >> 8);
radeon_ring_write(rdev, sq_tex_resource_word4);
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, sq_tex_resource_word0);
radeon_ring_write(ring, sq_tex_resource_word1);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, gpu_addr >> 8);
radeon_ring_write(ring, sq_tex_resource_word4);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
}
 
/* emits 12 */
231,20 → 232,21
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
 
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
 
/* emits 10 */
251,23 → 253,24
static void
draw_auto(struct radeon_device *rdev)
{
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, DI_PT_RECTLIST);
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, DI_PT_RECTLIST);
 
radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
 
radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(rdev, 1);
radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
radeon_ring_write(ring, 1);
 
radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(rdev, 3);
radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
radeon_ring_write(ring, 3);
radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
 
}
 
275,6 → 278,7
static void
set_default_state(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
430,51 → 434,26
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(gpu_addr & 0xFFFFFFFC));
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(ring, dwords);
 
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(rdev, sq_config);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
radeon_ring_write(rdev, sq_thread_resource_mgmt);
radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
radeon_ring_write(ring, sq_config);
radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
radeon_ring_write(ring, sq_thread_resource_mgmt);
radeon_ring_write(ring, sq_stack_resource_mgmt_1);
radeon_ring_write(ring, sq_stack_resource_mgmt_2);
}
 
static inline uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
 
if ((input & 0x3fff) == 0)
result = 0; /* 0 is a special case */
else {
exponent = 140; /* exponent biased by 127; */
fraction = (input & 0x3fff) << 10; /* cheat and only
handle numbers below 2^^15 */
for (i = 0; i < 14; i++) {
if (fraction & 0x800000)
break;
else {
fraction = fraction << 1; /* keep
shifting left until top bit = 1 */
exponent = exponent - 1;
}
}
result = exponent << 23 | (fraction & 0x7fffff); /* mask
off top bit; assumed 1 */
}
return result;
}
 
int r600_blit_init(struct radeon_device *rdev)
{
u32 obj_size;
483,11 → 462,27
u32 packet2s[16];
int num_packet2s = 0;
 
/* pin copy shader into vram if already initialized */
if (rdev->r600_blit.shader_obj)
goto done;
rdev->r600_blit.primitives.set_render_target = set_render_target;
rdev->r600_blit.primitives.cp_set_surface_sync = cp_set_surface_sync;
rdev->r600_blit.primitives.set_shaders = set_shaders;
rdev->r600_blit.primitives.set_vtx_resource = set_vtx_resource;
rdev->r600_blit.primitives.set_tex_resource = set_tex_resource;
rdev->r600_blit.primitives.set_scissors = set_scissors;
rdev->r600_blit.primitives.draw_auto = draw_auto;
rdev->r600_blit.primitives.set_default_state = set_default_state;
 
mutex_init(&rdev->r600_blit.mutex);
rdev->r600_blit.ring_size_common = 8; /* sync semaphore */
rdev->r600_blit.ring_size_common += 40; /* shaders + def state */
rdev->r600_blit.ring_size_common += 5; /* done copy */
rdev->r600_blit.ring_size_common += 16; /* fence emit for done copy */
 
rdev->r600_blit.ring_size_per_loop = 76;
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
rdev->r600_blit.ring_size_per_loop += 2;
 
rdev->r600_blit.max_dim = 8192;
 
rdev->r600_blit.state_offset = 0;
 
if (rdev->family >= CHIP_RV770)
512,13 → 507,28
obj_size += r6xx_ps_size * 4;
obj_size = ALIGN(obj_size, 256);
 
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_obj);
/* pin copy shader into vram if not already initialized */
if (rdev->r600_blit.shader_obj == NULL) {
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->r600_blit.shader_obj);
if (r) {
DRM_ERROR("r600 failed to allocate shader\n");
return r;
}
 
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
}
 
DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n",
obj_size,
rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
547,20 → 557,7
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 
done:
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->r600_blit.shader_gpu_addr);
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
if (r) {
dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
return r;
}
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
 
 
return 0;
}
 
582,263 → 579,176
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
 
static int r600_vb_ib_get(struct radeon_device *rdev)
static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int *width, int *height, int max_dim)
{
int r;
r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
unsigned max_pages;
unsigned pages = num_gpu_pages;
int w, h;
 
rdev->r600_blit.vb_total = 64*1024;
rdev->r600_blit.vb_used = 0;
return 0;
if (num_gpu_pages == 0) {
/* not supposed to be called with no pages, but just in case */
h = 0;
w = 0;
pages = 0;
WARN_ON(1);
} else {
int rect_order = 2;
h = RECT_UNIT_H;
while (num_gpu_pages / rect_order) {
h *= 2;
rect_order *= 4;
if (h >= max_dim) {
h = max_dim;
break;
}
}
max_pages = (max_dim * h) / (RECT_UNIT_W * RECT_UNIT_H);
if (pages > max_pages)
pages = max_pages;
w = (pages * RECT_UNIT_W * RECT_UNIT_H) / h;
w = (w / RECT_UNIT_W) * RECT_UNIT_W;
pages = (w * h) / (RECT_UNIT_W * RECT_UNIT_H);
BUG_ON(pages == 0);
}
 
static void r600_vb_ib_put(struct radeon_device *rdev)
{
radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence);
radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
 
DRM_DEBUG("blit_rectangle: h=%d, w=%d, pages=%d\n", h, w, pages);
 
/* return width and height only of the caller wants it */
if (height)
*height = h;
if (width)
*width = w;
 
return pages;
}
 
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
 
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
struct radeon_semaphore **sem)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
int ring_size, line_size;
int max_size;
/* loops of emits 64 + fence emit possible */
int dwords_per_loop = 76, num_loops;
int ring_size;
int num_loops = 0;
int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
 
r = r600_vb_ib_get(rdev);
if (r)
/* num loops */
while (num_gpu_pages) {
num_gpu_pages -=
r600_blit_create_rect(num_gpu_pages, NULL, NULL,
rdev->r600_blit.max_dim);
num_loops++;
}
 
/* 48 bytes for vertex per loop */
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, vb,
(num_loops*48)+256, 256, true);
if (r) {
return r;
}
 
/* set_render_target emits 2 extra dwords on rv6xx */
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770)
dwords_per_loop += 2;
r = radeon_semaphore_create(rdev, sem);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
return r;
}
 
/* 8 bpp vs 32 bpp for xfer unit */
if (size_bytes & 3)
line_size = 8192;
else
line_size = 8192*4;
 
max_size = 8192 * line_size;
 
/* major loops cover the max size transfer */
num_loops = ((size_bytes + max_size) / max_size);
/* minor loops cover the extra non aligned bits */
num_loops += ((size_bytes % line_size) ? 1 : 0);
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
ring_size += rdev->r600_blit.ring_size_common;
r = radeon_ring_lock(rdev, ring, ring_size);
if (r) {
radeon_sa_bo_free(rdev, vb, NULL);
radeon_semaphore_free(rdev, sem, NULL);
return r;
}
 
set_default_state(rdev); /* 14 */
set_shaders(rdev); /* 26 */
if (radeon_fence_need_sync(*fence, RADEON_RING_TYPE_GFX_INDEX)) {
radeon_semaphore_sync_rings(rdev, *sem, (*fence)->ring,
RADEON_RING_TYPE_GFX_INDEX);
radeon_fence_note_sync(*fence, RADEON_RING_TYPE_GFX_INDEX);
} else {
radeon_semaphore_free(rdev, sem, NULL);
}
 
rdev->r600_blit.primitives.set_default_state(rdev);
rdev->r600_blit.primitives.set_shaders(rdev);
return 0;
}
 
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb, struct radeon_semaphore *sem)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return;
}
 
if (fence)
r = radeon_fence_emit(rdev, fence);
 
radeon_ring_unlock_commit(rdev);
radeon_ring_unlock_commit(rdev, ring);
radeon_sa_bo_free(rdev, &vb, *fence);
radeon_semaphore_free(rdev, &sem, *fence);
}
 
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes)
unsigned num_gpu_pages,
struct radeon_sa_bo *vb)
{
int max_bytes;
u64 vb_gpu_addr;
u32 *vb;
u32 *vb_cpu_addr;
 
DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr,
size_bytes, rdev->r600_blit.vb_used);
vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used);
if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) {
max_bytes = 8192;
DRM_DEBUG("emitting copy %16llx %16llx %d\n",
src_gpu_addr, dst_gpu_addr, num_gpu_pages);
vb_cpu_addr = (u32 *)radeon_sa_bo_cpu_addr(vb);
vb_gpu_addr = radeon_sa_bo_gpu_addr(vb);
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = src_gpu_addr & 255;
int dst_x = dst_gpu_addr & 255;
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
while (num_gpu_pages) {
int w, h;
unsigned size_in_bytes;
unsigned pages_per_loop =
r600_blit_create_rect(num_gpu_pages, &w, &h,
rdev->r600_blit.max_dim);
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
size_in_bytes = pages_per_loop * RADEON_GPU_PAGE_SIZE;
DRM_DEBUG("rectangle w=%d h=%d\n", w, h);
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
vb_cpu_addr[0] = 0;
vb_cpu_addr[1] = 0;
vb_cpu_addr[2] = 0;
vb_cpu_addr[3] = 0;
 
vb[0] = i2f(dst_x);
vb[1] = 0;
vb[2] = i2f(src_x);
vb[3] = 0;
vb_cpu_addr[4] = 0;
vb_cpu_addr[5] = int2float(h);
vb_cpu_addr[6] = 0;
vb_cpu_addr[7] = int2float(h);
 
vb[4] = i2f(dst_x);
vb[5] = i2f(h);
vb[6] = i2f(src_x);
vb[7] = i2f(h);
vb_cpu_addr[8] = int2float(w);
vb_cpu_addr[9] = int2float(h);
vb_cpu_addr[10] = int2float(w);
vb_cpu_addr[11] = int2float(h);
 
vb[8] = i2f(dst_x + cur_size);
vb[9] = i2f(h);
vb[10] = i2f(src_x + cur_size);
vb[11] = i2f(h);
 
/* src 9 */
set_tex_resource(rdev, FMT_8,
src_x + cur_size, h, src_x + cur_size,
src_gpu_addr);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
/* dst 23 */
set_render_target(rdev, COLOR_8,
dst_x + cur_size, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, dst_x, 0, dst_x + cur_size, h);
 
/* 14 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
rdev->r600_blit.primitives.set_tex_resource(rdev, FMT_8_8_8_8,
w, h, w, src_gpu_addr, size_in_bytes);
rdev->r600_blit.primitives.set_render_target(rdev, COLOR_8_8_8_8,
w, h, dst_gpu_addr);
rdev->r600_blit.primitives.set_scissors(rdev, 0, 0, w, h);
rdev->r600_blit.primitives.set_vtx_resource(rdev, vb_gpu_addr);
rdev->r600_blit.primitives.draw_auto(rdev);
rdev->r600_blit.primitives.cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
size_in_bytes, dst_gpu_addr);
 
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
vb_cpu_addr += 12;
vb_gpu_addr += 4*12;
src_gpu_addr += size_in_bytes;
dst_gpu_addr += size_in_bytes;
num_gpu_pages -= pages_per_loop;
}
} else {
max_bytes = 8192 * 4;
 
while (size_bytes) {
int cur_size = size_bytes;
int src_x = (src_gpu_addr & 255);
int dst_x = (dst_gpu_addr & 255);
int h = 1;
src_gpu_addr = src_gpu_addr & ~255ULL;
dst_gpu_addr = dst_gpu_addr & ~255ULL;
 
if (!src_x && !dst_x) {
h = (cur_size / max_bytes);
if (h > 8192)
h = 8192;
if (h == 0)
h = 1;
else
cur_size = max_bytes;
} else {
if (cur_size > max_bytes)
cur_size = max_bytes;
if (cur_size > (max_bytes - dst_x))
cur_size = (max_bytes - dst_x);
if (cur_size > (max_bytes - src_x))
cur_size = (max_bytes - src_x);
}
 
if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) {
// WARN_ON(1);
}
 
vb[0] = i2f(dst_x / 4);
vb[1] = 0;
vb[2] = i2f(src_x / 4);
vb[3] = 0;
 
vb[4] = i2f(dst_x / 4);
vb[5] = i2f(h);
vb[6] = i2f(src_x / 4);
vb[7] = i2f(h);
 
vb[8] = i2f((dst_x + cur_size) / 4);
vb[9] = i2f(h);
vb[10] = i2f((src_x + cur_size) / 4);
vb[11] = i2f(h);
 
/* src 9 */
set_tex_resource(rdev, FMT_8_8_8_8,
(src_x + cur_size) / 4,
h, (src_x + cur_size) / 4,
src_gpu_addr);
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr);
 
/* dst 23 */
set_render_target(rdev, COLOR_8_8_8_8,
(dst_x + cur_size) / 4, h,
dst_gpu_addr);
 
/* scissors 12 */
set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h);
 
/* Vertex buffer setup 14 */
vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used;
set_vtx_resource(rdev, vb_gpu_addr);
 
/* draw 10 */
draw_auto(rdev);
 
/* 5 */
cp_set_surface_sync(rdev,
PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA,
cur_size * h, dst_gpu_addr);
 
/* 78 ring dwords per loop */
vb += 12;
rdev->r600_blit.vb_used += 12 * 4;
 
src_gpu_addr += cur_size * h;
dst_gpu_addr += cur_size * h;
size_bytes -= cur_size * h;
}
}
}
 
 
 
/drivers/video/drm/radeon/r600_blit_shaders.c
314,6 → 314,10
0x00000000, /* VGT_VTX_CNT_EN */
 
0xc0016900,
0x000000d4,
0x00000000, /* SX_MISC */
 
0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
 
626,6 → 630,10
0x00000000, /* VGT_VTX_CNT_EN */
 
0xc0016900,
0x000000d4,
0x00000000, /* SX_MISC */
 
0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
 
/drivers/video/drm/radeon/r600_blit_shaders.h
35,4 → 35,5
extern const u32 r6xx_ps_size, r6xx_vs_size;
extern const u32 r6xx_default_size, r7xx_default_size;
 
__pure uint32_t int2float(uint32_t x);
#endif
/drivers/video/drm/radeon/r600_hdmi.c
23,10 → 23,11
*
* Authors: Christian König
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "r600d.h"
#include "atom.h"
 
/*
52,19 → 53,7
AUDIO_STATUS_LEVEL = 0x80
};
 
struct {
uint32_t Clock;
 
int N_32kHz;
int CTS_32kHz;
 
int N_44_1kHz;
int CTS_44_1kHz;
 
int N_48kHz;
int CTS_48kHz;
 
} r600_hdmi_ACR[] = {
static const struct radeon_hdmi_acr r600_hdmi_predefined_acr[] = {
/* 32kHz 44.1kHz 48kHz */
/* Clock N CTS N CTS N CTS */
{ 25174, 4576, 28125, 7007, 31250, 6864, 28125 }, /* 25,20/1.001 MHz */
83,7 → 72,7
/*
* calculate CTS value if it's not found in the table
*/
static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
static void r600_hdmi_calc_cts(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
*CTS = clock * N / (128 * freq) * 1000;
91,6 → 80,24
N, *CTS, freq);
}
 
struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock)
{
struct radeon_hdmi_acr res;
u8 i;
 
for (i = 0; r600_hdmi_predefined_acr[i].clock != clock &&
r600_hdmi_predefined_acr[i].clock != 0; i++)
;
res = r600_hdmi_predefined_acr[i];
 
/* In case some CTS are missing */
r600_hdmi_calc_cts(clock, &res.cts_32khz, res.n_32khz, 32000);
r600_hdmi_calc_cts(clock, &res.cts_44_1khz, res.n_44_1khz, 44100);
r600_hdmi_calc_cts(clock, &res.cts_48khz, res.n_48khz, 48000);
 
return res;
}
 
/*
* update the N and CTS parameters for a given pixel clock rate
*/
98,30 → 105,19
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
int CTS;
int N;
int i;
struct radeon_hdmi_acr acr = r600_hdmi_acr(clock);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
for (i = 0; r600_hdmi_ACR[i].Clock != clock && r600_hdmi_ACR[i].Clock != 0; i++);
WREG32(HDMI0_ACR_32_0 + offset, HDMI0_ACR_CTS_32(acr.cts_32khz));
WREG32(HDMI0_ACR_32_1 + offset, acr.n_32khz);
 
CTS = r600_hdmi_ACR[i].CTS_32kHz;
N = r600_hdmi_ACR[i].N_32kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 32000);
WREG32(offset+R600_HDMI_32kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_32kHz_N, N);
WREG32(HDMI0_ACR_44_0 + offset, HDMI0_ACR_CTS_44(acr.cts_44_1khz));
WREG32(HDMI0_ACR_44_1 + offset, acr.n_44_1khz);
 
CTS = r600_hdmi_ACR[i].CTS_44_1kHz;
N = r600_hdmi_ACR[i].N_44_1kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 44100);
WREG32(offset+R600_HDMI_44_1kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_44_1kHz_N, N);
 
CTS = r600_hdmi_ACR[i].CTS_48kHz;
N = r600_hdmi_ACR[i].N_48kHz;
r600_hdmi_calc_CTS(clock, &CTS, N, 48000);
WREG32(offset+R600_HDMI_48kHz_CTS, CTS << 12);
WREG32(offset+R600_HDMI_48kHz_N, N);
WREG32(HDMI0_ACR_48_0 + offset, HDMI0_ACR_CTS_48(acr.cts_48khz));
WREG32(HDMI0_ACR_48_1 + offset, acr.n_48khz);
}
 
/*
165,7 → 161,9
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
uint8_t frame[14];
 
196,14 → 194,21
frame[0xD] = (right_bar >> 8);
 
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
/* Our header values (type, version, length) should be alright, Intel
* is using the same. Checksum function also seems to be OK, it works
* fine for audio infoframe. However calculated value is always lower
* by 2 in comparison to fglrx. It breaks displaying anything in case
* of TVs that strictly check the checksum. Hack it manually here to
* workaround this issue. */
frame[0x0] += 2;
 
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
WREG32(HDMI0_AVI_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_1,
WREG32(HDMI0_AVI_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24));
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_2,
WREG32(HDMI0_AVI_INFO2 + offset,
frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24));
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_3,
WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8));
}
 
224,7 → 229,9
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
uint8_t frame[11];
 
242,9 → 249,9
 
r600_hdmi_infoframe_checksum(0x84, 0x01, 0x0A, frame);
 
WREG32(offset+R600_HDMI_AUDIOINFOFRAME_0,
WREG32(HDMI0_AUDIO_INFO0 + offset,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
WREG32(offset+R600_HDMI_AUDIOINFOFRAME_1,
WREG32(HDMI0_AUDIO_INFO1 + offset,
frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x8] << 24));
}
 
251,13 → 258,15
/*
* test if audio buffer is filled enough to start playing
*/
static int r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
static bool r600_hdmi_is_audio_buffer_filled(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
 
return (RREG32(offset+R600_HDMI_STATUS) & 0x10) != 0;
return (RREG32(HDMI0_STATUS + offset) & 0x10) != 0;
}
 
/*
266,14 → 275,15
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int status, result;
 
if (!radeon_encoder->hdmi_offset)
if (!dig->afmt || !dig->afmt->enabled)
return 0;
 
status = r600_hdmi_is_audio_buffer_filled(encoder);
result = radeon_encoder->hdmi_buffer_status != status;
radeon_encoder->hdmi_buffer_status = status;
result = dig->afmt->last_buffer_filled_status != status;
dig->afmt->last_buffer_filled_status = status;
 
return result;
}
281,27 → 291,24
/*
* write the audio workaround status to the hardware
*/
void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
static void r600_hdmi_audio_workaround(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset = radeon_encoder->hdmi_offset;
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset = dig->afmt->offset;
bool hdmi_audio_workaround = false; /* FIXME */
u32 value;
 
if (!offset)
return;
 
if (!radeon_encoder->hdmi_audio_workaround ||
r600_hdmi_is_audio_buffer_filled(encoder)) {
 
/* disable audio workaround */
WREG32_P(offset+R600_HDMI_CNTL, 0x00000001, ~0x00001001);
 
} else {
/* enable audio workaround */
WREG32_P(offset+R600_HDMI_CNTL, 0x00001001, ~0x00001001);
if (!hdmi_audio_workaround ||
r600_hdmi_is_audio_buffer_filled(encoder))
value = 0; /* disable workaround */
else
value = HDMI0_AUDIO_TEST_EN; /* enable workaround */
WREG32_P(HDMI0_AUDIO_PACKET_CONTROL + offset,
value, ~HDMI0_AUDIO_TEST_EN);
}
}
 
 
/*
311,41 → 318,74
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
 
if (ASIC_IS_DCE4(rdev))
/* Silent, r600_hdmi_enable will raise WARN for us */
if (!dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
if (!offset)
return;
// r600_audio_set_clock(encoder, mode->clock);
 
r600_audio_set_clock(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND); /* send null packets when required */
 
WREG32(offset+R600_HDMI_UNKNOWN_0, 0x1000);
WREG32(offset+R600_HDMI_UNKNOWN_1, 0x0);
WREG32(offset+R600_HDMI_UNKNOWN_2, 0x1000);
WREG32(HDMI0_AUDIO_CRC_CONTROL + offset, 0x1000);
 
r600_hdmi_update_ACR(encoder, mode->clock);
if (ASIC_IS_DCE32(rdev)) {
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
HDMI0_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
AFMT_AUDIO_SAMPLE_SEND | /* send audio packets */
AFMT_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
} else {
WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
}
 
WREG32(offset+R600_HDMI_VIDEOCNTL, 0x13);
WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
HDMI0_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
HDMI0_ACR_SOURCE); /* select SW CTS value */
 
WREG32(offset+R600_HDMI_VERSION, 0x202);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
HDMI0_NULL_SEND | /* send null packets when required */
HDMI0_GC_SEND | /* send general control packets */
HDMI0_GC_CONT); /* send general control packets every frame */
 
/* TODO: HDMI0_AUDIO_INFO_UPDATE */
WREG32(HDMI0_INFOFRAME_CONTROL0 + offset,
HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
HDMI0_AVI_INFO_CONT | /* send AVI info frames every frame/field */
HDMI0_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
HDMI0_AUDIO_INFO_CONT); /* send audio info frames every frame/field */
 
WREG32(HDMI0_INFOFRAME_CONTROL1 + offset,
HDMI0_AVI_INFO_LINE(2) | /* anything other than 0 */
HDMI0_AUDIO_INFO_LINE(2)); /* anything other than 0 */
 
WREG32(HDMI0_GC + offset, 0); /* unset HDMI0_GC_AVMUTE */
 
r600_hdmi_videoinfoframe(encoder, RGB, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
 
r600_hdmi_update_ACR(encoder, mode->clock);
 
/* it's unknown what these bits do excatly, but it's indeed quite useful for debugging */
WREG32(offset+R600_HDMI_AUDIO_DEBUG_0, 0x00FFFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_1, 0x007FFFFF);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_2, 0x00000001);
WREG32(offset+R600_HDMI_AUDIO_DEBUG_3, 0x00000001);
WREG32(HDMI0_RAMP_CONTROL0 + offset, 0x00FFFFFF);
WREG32(HDMI0_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(HDMI0_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
 
r600_hdmi_audio_workaround(encoder);
 
/* audio packets per line, does anyone know how to calc this ? */
WREG32_P(offset+R600_HDMI_CNTL, 0x00040000, ~0x001F0000);
}
 
#if 0
/*
* update settings with current parameters from audio engine
*/
353,127 → 393,83
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
 
int channels = r600_audio_channels(rdev);
int rate = r600_audio_rate(rdev);
int bps = r600_audio_bits_per_sample(rdev);
uint8_t status_bits = r600_audio_status_bits(rdev);
uint8_t category_code = r600_audio_category_code(rdev);
 
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct r600_audio audio = r600_audio_status(rdev);
uint32_t offset;
uint32_t iec;
 
if (!offset)
if (!dig->afmt || !dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
DRM_DEBUG("%s with %d channels, %d Hz sampling rate, %d bits per sample,\n",
r600_hdmi_is_audio_buffer_filled(encoder) ? "playing" : "stopped",
channels, rate, bps);
audio.channels, audio.rate, audio.bits_per_sample);
DRM_DEBUG("0x%02X IEC60958 status bits and 0x%02X category code\n",
(int)status_bits, (int)category_code);
(int)audio.status_bits, (int)audio.category_code);
 
iec = 0;
if (status_bits & AUDIO_STATUS_PROFESSIONAL)
if (audio.status_bits & AUDIO_STATUS_PROFESSIONAL)
iec |= 1 << 0;
if (status_bits & AUDIO_STATUS_NONAUDIO)
if (audio.status_bits & AUDIO_STATUS_NONAUDIO)
iec |= 1 << 1;
if (status_bits & AUDIO_STATUS_COPYRIGHT)
if (audio.status_bits & AUDIO_STATUS_COPYRIGHT)
iec |= 1 << 2;
if (status_bits & AUDIO_STATUS_EMPHASIS)
if (audio.status_bits & AUDIO_STATUS_EMPHASIS)
iec |= 1 << 3;
 
iec |= category_code << 8;
iec |= HDMI0_60958_CS_CATEGORY_CODE(audio.category_code);
 
switch (rate) {
case 32000: iec |= 0x3 << 24; break;
case 44100: iec |= 0x0 << 24; break;
case 88200: iec |= 0x8 << 24; break;
case 176400: iec |= 0xc << 24; break;
case 48000: iec |= 0x2 << 24; break;
case 96000: iec |= 0xa << 24; break;
case 192000: iec |= 0xe << 24; break;
switch (audio.rate) {
case 32000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x3);
break;
case 44100:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x0);
break;
case 48000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x2);
break;
case 88200:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0x8);
break;
case 96000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xa);
break;
case 176400:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xc);
break;
case 192000:
iec |= HDMI0_60958_CS_SAMPLING_FREQUENCY(0xe);
break;
}
 
WREG32(offset+R600_HDMI_IEC60958_1, iec);
WREG32(HDMI0_60958_0 + offset, iec);
 
iec = 0;
switch (bps) {
case 16: iec |= 0x2; break;
case 20: iec |= 0x3; break;
case 24: iec |= 0xb; break;
}
if (status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
 
WREG32_P(offset+R600_HDMI_IEC60958_2, iec, ~0x5000f);
 
/* 0x021 or 0x031 sets the audio frame length */
WREG32(offset+R600_HDMI_AUDIOCNTL, 0x31);
r600_hdmi_audioinfoframe(encoder, channels-1, 0, 0, 0, 0, 0, 0, 0);
 
r600_hdmi_audio_workaround(encoder);
}
 
static int r600_hdmi_find_free_block(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
bool free_blocks[3] = { true, true, true };
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->hdmi_offset) {
case R600_HDMI_BLOCK1:
free_blocks[0] = false;
switch (audio.bits_per_sample) {
case 16:
iec |= HDMI0_60958_CS_WORD_LENGTH(0x2);
break;
case R600_HDMI_BLOCK2:
free_blocks[1] = false;
case 20:
iec |= HDMI0_60958_CS_WORD_LENGTH(0x3);
break;
case R600_HDMI_BLOCK3:
free_blocks[2] = false;
case 24:
iec |= HDMI0_60958_CS_WORD_LENGTH(0xb);
break;
}
}
if (audio.status_bits & AUDIO_STATUS_V)
iec |= 0x5 << 16;
WREG32_P(HDMI0_60958_1 + offset, iec, ~0x5000f);
 
if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690 ||
rdev->family == CHIP_RS740) {
return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
} else if (rdev->family >= CHIP_R600) {
if (free_blocks[0])
return R600_HDMI_BLOCK1;
else if (free_blocks[1])
return R600_HDMI_BLOCK2;
}
return 0;
}
r600_hdmi_audioinfoframe(encoder, audio.channels - 1, 0, 0, 0, 0, 0, 0,
0);
 
static void r600_hdmi_assign_block(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
r600_hdmi_audio_workaround(encoder);
}
#endif
 
if (ASIC_IS_DCE4(rdev)) {
/* TODO */
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
if (ASIC_IS_DCE32(rdev))
radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
} else if (rdev->family >= CHIP_R600 || rdev->family == CHIP_RS600 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
 
/*
* enable the HDMI engine
*/
482,56 → 478,56
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
u32 hdmi;
 
if (ASIC_IS_DCE4(rdev))
if (ASIC_IS_DCE6(rdev))
return;
 
if (!radeon_encoder->hdmi_offset) {
r600_hdmi_assign_block(encoder);
if (!radeon_encoder->hdmi_offset) {
dev_warn(rdev->dev, "Could not find HDMI block for "
"0x%x encoder\n", radeon_encoder->encoder_id);
/* Silent, r600_hdmi_enable will raise WARN for us */
if (dig->afmt->enabled)
return;
}
}
offset = dig->afmt->offset;
 
offset = radeon_encoder->hdmi_offset;
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
/* Older chipsets require setting HDMI and routing manually */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
hdmi = HDMI0_ERROR_ACK | HDMI0_ENABLE;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
~AVIVO_TMDSA_CNTL_HDMI_EN);
hdmi |= HDMI0_STREAM(HDMI0_STREAM_TMDSA);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
~AVIVO_LVTMA_CNTL_HDMI_EN);
hdmi |= HDMI0_STREAM(HDMI0_STREAM_LVTMA);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
WREG32_P(DDIA_CNTL, DDIA_HDMI_EN, ~DDIA_HDMI_EN);
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DDIA);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
hdmi |= HDMI0_STREAM(HDMI0_STREAM_DVOA);
break;
default:
dev_err(rdev->dev, "Unknown HDMI output type\n");
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
break;
}
WREG32(HDMI0_CONTROL + offset, hdmi);
}
#if 0
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
&& rdev->family != CHIP_RS740) {
 
if (rdev->irq.installed) {
/* if irq is available use it */
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
// radeon_irq_kms_enable_afmt(rdev, dig->afmt->id);
}
 
r600_audio_disable_polling(encoder);
} else {
/* if not fallback to polling */
r600_audio_enable_polling(encoder);
}
#endif
dig->afmt->enabled = true;
 
DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
offset, radeon_encoder->encoder_id);
}
 
/*
542,38 → 538,50
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
uint32_t offset;
 
if (ASIC_IS_DCE4(rdev))
if (ASIC_IS_DCE6(rdev))
return;
 
offset = radeon_encoder->hdmi_offset;
if (!offset) {
dev_err(rdev->dev, "Disabling not enabled HDMI\n");
/* Called for ATOM_ENCODER_MODE_HDMI only */
if (!dig || !dig->afmt) {
WARN_ON(1);
return;
}
if (!dig->afmt->enabled)
return;
offset = dig->afmt->offset;
 
DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
offset, radeon_encoder->encoder_id);
 
if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
/* disable irq */
// radeon_irq_kms_disable_afmt(rdev, dig->afmt->id);
 
/* Older chipsets not handled by AtomBIOS */
if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
WREG32_P(AVIVO_TMDSA_CNTL, 0,
~AVIVO_TMDSA_CNTL_HDMI_EN);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
WREG32(offset + R600_HDMI_ENABLE, 0);
WREG32_P(AVIVO_LVTMA_CNTL, 0,
~AVIVO_LVTMA_CNTL_HDMI_EN);
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
WREG32_P(DDIA_CNTL, 0, ~DDIA_HDMI_EN);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
break;
default:
dev_err(rdev->dev, "Unknown HDMI output type\n");
dev_err(rdev->dev, "Invalid encoder for HDMI: 0x%X\n",
radeon_encoder->encoder_id);
break;
}
WREG32(HDMI0_CONTROL + offset, HDMI0_ERROR_ACK);
}
 
radeon_encoder->hdmi_offset = 0;
radeon_encoder->hdmi_config_offset = 0;
dig->afmt->enabled = false;
}
/drivers/video/drm/radeon/r600_reg.h
156,45 → 156,10
#define R600_AUDIO_PIN_WIDGET_CNTL 0x73d4
#define R600_AUDIO_STATUS_BITS 0x73d8
 
/* HDMI base register addresses */
#define R600_HDMI_BLOCK1 0x7400
#define R600_HDMI_BLOCK2 0x7700
#define R600_HDMI_BLOCK3 0x7800
#define DCE2_HDMI_OFFSET0 (0x7400 - 0x7400)
#define DCE2_HDMI_OFFSET1 (0x7700 - 0x7400)
/* DCE3.2 second instance starts at 0x7800 */
#define DCE3_HDMI_OFFSET0 (0x7400 - 0x7400)
#define DCE3_HDMI_OFFSET1 (0x7800 - 0x7400)
 
/* HDMI registers */
#define R600_HDMI_ENABLE 0x00
#define R600_HDMI_STATUS 0x04
# define R600_HDMI_INT_PENDING (1 << 29)
#define R600_HDMI_CNTL 0x08
# define R600_HDMI_INT_EN (1 << 28)
# define R600_HDMI_INT_ACK (1 << 29)
#define R600_HDMI_UNKNOWN_0 0x0C
#define R600_HDMI_AUDIOCNTL 0x10
#define R600_HDMI_VIDEOCNTL 0x14
#define R600_HDMI_VERSION 0x18
#define R600_HDMI_UNKNOWN_1 0x28
#define R600_HDMI_VIDEOINFOFRAME_0 0x54
#define R600_HDMI_VIDEOINFOFRAME_1 0x58
#define R600_HDMI_VIDEOINFOFRAME_2 0x5c
#define R600_HDMI_VIDEOINFOFRAME_3 0x60
#define R600_HDMI_32kHz_CTS 0xac
#define R600_HDMI_32kHz_N 0xb0
#define R600_HDMI_44_1kHz_CTS 0xb4
#define R600_HDMI_44_1kHz_N 0xb8
#define R600_HDMI_48kHz_CTS 0xbc
#define R600_HDMI_48kHz_N 0xc0
#define R600_HDMI_AUDIOINFOFRAME_0 0xcc
#define R600_HDMI_AUDIOINFOFRAME_1 0xd0
#define R600_HDMI_IEC60958_1 0xd4
#define R600_HDMI_IEC60958_2 0xd8
#define R600_HDMI_UNKNOWN_2 0xdc
#define R600_HDMI_AUDIO_DEBUG_0 0xe0
#define R600_HDMI_AUDIO_DEBUG_1 0xe4
#define R600_HDMI_AUDIO_DEBUG_2 0xe8
#define R600_HDMI_AUDIO_DEBUG_3 0xec
 
/* HDMI additional config base register addresses */
#define R600_HDMI_CONFIG1 0x7600
#define R600_HDMI_CONFIG2 0x7a00
 
#endif
/drivers/video/drm/radeon/r600_video.c
709,7 → 709,7
 
mutex_lock(&rdev->r600_video.mutex);
rdev->r600_video.vb_ib = NULL;
r = r600_video_prepare_copy(rdev, h*pitch);
r = r600_video_prepare_copy(rdev, w*4);
if (r) {
// if (rdev->r600_blit.vb_ib)
// radeon_ib_free(rdev, &rdev->r600_blit.vb_ib);
/drivers/video/drm/radeon/r600d.h
66,6 → 66,14
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
 
#define R_028808_CB_COLOR_CONTROL 0x28808
#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4)
#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7)
#define C_028808_SPECIAL_OP 0xFFFFFF8F
#define V_028808_SPECIAL_NORMAL 0x00
#define V_028808_SPECIAL_DISABLE 0x01
#define V_028808_SPECIAL_RESOLVE_BOX 0x07
 
#define CB_COLOR0_BASE 0x28040
#define CB_COLOR1_BASE 0x28044
#define CB_COLOR2_BASE 0x28048
78,7 → 86,40
 
#define CB_COLOR0_SIZE 0x28060
#define CB_COLOR0_VIEW 0x28080
#define R_028080_CB_COLOR0_VIEW 0x028080
#define S_028080_SLICE_START(x) (((x) & 0x7FF) << 0)
#define G_028080_SLICE_START(x) (((x) >> 0) & 0x7FF)
#define C_028080_SLICE_START 0xFFFFF800
#define S_028080_SLICE_MAX(x) (((x) & 0x7FF) << 13)
#define G_028080_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
#define C_028080_SLICE_MAX 0xFF001FFF
#define R_028084_CB_COLOR1_VIEW 0x028084
#define R_028088_CB_COLOR2_VIEW 0x028088
#define R_02808C_CB_COLOR3_VIEW 0x02808C
#define R_028090_CB_COLOR4_VIEW 0x028090
#define R_028094_CB_COLOR5_VIEW 0x028094
#define R_028098_CB_COLOR6_VIEW 0x028098
#define R_02809C_CB_COLOR7_VIEW 0x02809C
#define R_028100_CB_COLOR0_MASK 0x028100
#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0)
#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF)
#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000
#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12)
#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF)
#define C_028100_FMASK_TILE_MAX 0x00000FFF
#define R_028104_CB_COLOR1_MASK 0x028104
#define R_028108_CB_COLOR2_MASK 0x028108
#define R_02810C_CB_COLOR3_MASK 0x02810C
#define R_028110_CB_COLOR4_MASK 0x028110
#define R_028114_CB_COLOR5_MASK 0x028114
#define R_028118_CB_COLOR6_MASK 0x028118
#define R_02811C_CB_COLOR7_MASK 0x02811C
#define CB_COLOR0_INFO 0x280a0
# define CB_FORMAT(x) ((x) << 2)
# define CB_ARRAY_MODE(x) ((x) << 8)
# define CB_SOURCE_FORMAT(x) ((x) << 27)
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_TILE 0x280c0
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
134,6 → 175,9
 
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
#define CP_STALLED_STAT1 0x8674
#define CP_STALLED_STAT2 0x8678
#define CP_BUSY_STAT 0x867C
#define CP_STAT 0x8680
#define CP_COHER_BASE 0x85F8
#define CP_DEBUG 0xC1FC
176,6 → 220,14
#define PREZ_MUST_WAIT_FOR_POSTZ_DONE (1 << 31)
#define DB_DEPTH_BASE 0x2800C
#define DB_HTILE_DATA_BASE 0x28014
#define DB_HTILE_SURFACE 0x28D24
#define S_028D24_HTILE_WIDTH(x) (((x) & 0x1) << 0)
#define G_028D24_HTILE_WIDTH(x) (((x) >> 0) & 0x1)
#define C_028D24_HTILE_WIDTH 0xFFFFFFFE
#define S_028D24_HTILE_HEIGHT(x) (((x) & 0x1) << 1)
#define G_028D24_HTILE_HEIGHT(x) (((x) >> 1) & 0x1)
#define C_028D24_HTILE_HEIGHT 0xFFFFFFFD
#define G_028D24_LINEAR(x) (((x) >> 2) & 0x1)
#define DB_WATERMARKS 0x9838
#define DEPTH_FREE(x) ((x) << 0)
#define DEPTH_FLUSH(x) ((x) << 5)
192,6 → 244,8
#define BACKEND_MAP(x) ((x) << 16)
 
#define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
417,6 → 471,17
#define SQ_PGM_START_VS 0x28858
#define SQ_PGM_RESOURCES_VS 0x28868
#define SQ_PGM_CF_OFFSET_VS 0x288d0
 
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
#define SQ_VTX_CONSTANT_WORD2_0 0x30008
# define SQ_VTXC_BASE_ADDR_HI(x) ((x) << 0)
# define SQ_VTXC_STRIDE(x) ((x) << 8)
# define SQ_VTXC_ENDIAN_SWAP(x) ((x) << 30)
# define SQ_ENDIAN_NONE 0
# define SQ_ENDIAN_8IN16 1
# define SQ_ENDIAN_8IN32 2
#define SQ_VTX_CONSTANT_WORD3_0 0x3000c
#define SQ_VTX_CONSTANT_WORD6_0 0x38018
#define S__SQ_VTX_CONSTANT_TYPE(x) (((x) & 3) << 30)
#define G__SQ_VTX_CONSTANT_TYPE(x) (((x) >> 30) & 3)
445,6 → 510,7
#define TC_L2_SIZE(x) ((x)<<5)
#define L2_DISABLE_LATE_HIT (1<<9)
 
#define VC_ENHANCE 0x9714
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
477,6 → 543,11
#define VGT_STRMOUT_BUFFER_OFFSET_1 0x28AEC
#define VGT_STRMOUT_BUFFER_OFFSET_2 0x28AFC
#define VGT_STRMOUT_BUFFER_OFFSET_3 0x28B0C
#define VGT_STRMOUT_BUFFER_SIZE_0 0x28AD0
#define VGT_STRMOUT_BUFFER_SIZE_1 0x28AE0
#define VGT_STRMOUT_BUFFER_SIZE_2 0x28AF0
#define VGT_STRMOUT_BUFFER_SIZE_3 0x28B00
 
#define VGT_STRMOUT_EN 0x28AB0
#define VGT_VERTEX_REUSE_BLOCK_CNTL 0x28C58
#define VTX_REUSE_DEPTH_MASK 0x000000FF
553,11 → 624,18
#define RLC_HB_WPTR 0x3f1c
#define RLC_HB_WPTR_LSB_ADDR 0x3f14
#define RLC_HB_WPTR_MSB_ADDR 0x3f18
#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38
#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40
#define RLC_MC_CNTL 0x3f44
#define RLC_UCODE_CNTL 0x3f48
#define RLC_UCODE_ADDR 0x3f2c
#define RLC_UCODE_DATA 0x3f30
 
/* new for TN */
#define TN_RLC_SAVE_AND_RESTORE_BASE 0x3f10
#define TN_RLC_CLEAR_STATE_RESTORE_BASE 0x3f20
 
#define SRBM_SOFT_RESET 0xe60
# define SOFT_RESET_RLC (1 << 13)
 
777,6 → 855,239
# define TARGET_LINK_SPEED_MASK (0xf << 0)
# define SELECTABLE_DEEMPHASIS (1 << 6)
 
/* Audio clocks */
#define DCCG_AUDIO_DTO0_PHASE 0x0514
#define DCCG_AUDIO_DTO0_MODULE 0x0518
#define DCCG_AUDIO_DTO0_LOAD 0x051c
# define DTO_LOAD (1 << 31)
#define DCCG_AUDIO_DTO0_CNTL 0x0520
 
#define DCCG_AUDIO_DTO1_PHASE 0x0524
#define DCCG_AUDIO_DTO1_MODULE 0x0528
#define DCCG_AUDIO_DTO1_LOAD 0x052c
#define DCCG_AUDIO_DTO1_CNTL 0x0530
 
#define DCCG_AUDIO_DTO_SELECT 0x0534
 
/* digital blocks */
#define TMDSA_CNTL 0x7880
# define TMDSA_HDMI_EN (1 << 2)
#define LVTMA_CNTL 0x7a80
# define LVTMA_HDMI_EN (1 << 2)
#define DDIA_CNTL 0x7200
# define DDIA_HDMI_EN (1 << 2)
#define DIG0_CNTL 0x75a0
# define DIG_MODE(x) (((x) & 7) << 8)
# define DIG_MODE_DP 0
# define DIG_MODE_LVDS 1
# define DIG_MODE_TMDS_DVI 2
# define DIG_MODE_TMDS_HDMI 3
# define DIG_MODE_SDVO 4
#define DIG1_CNTL 0x79a0
 
/* rs6xx/rs740 and r6xx share the same HDMI blocks, however, rs6xx has only one
* instance of the blocks while r6xx has 2. DCE 3.0 cards are slightly
* different due to the new DIG blocks, but also have 2 instances.
* DCE 3.0 HDMI blocks are part of each DIG encoder.
*/
 
/* rs6xx/rs740/r6xx/dce3 */
#define HDMI0_CONTROL 0x7400
/* rs6xx/rs740/r6xx */
# define HDMI0_ENABLE (1 << 0)
# define HDMI0_STREAM(x) (((x) & 3) << 2)
# define HDMI0_STREAM_TMDSA 0
# define HDMI0_STREAM_LVTMA 1
# define HDMI0_STREAM_DVOA 2
# define HDMI0_STREAM_DDIA 3
/* rs6xx/r6xx/dce3 */
# define HDMI0_ERROR_ACK (1 << 8)
# define HDMI0_ERROR_MASK (1 << 9)
#define HDMI0_STATUS 0x7404
# define HDMI0_ACTIVE_AVMUTE (1 << 0)
# define HDMI0_AUDIO_ENABLE (1 << 4)
# define HDMI0_AZ_FORMAT_WTRIG (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_INT (1 << 29)
#define HDMI0_AUDIO_PACKET_CONTROL 0x7408
# define HDMI0_AUDIO_SAMPLE_SEND (1 << 0)
# define HDMI0_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI0_AUDIO_SEND_MAX_PACKETS (1 << 8)
# define HDMI0_AUDIO_TEST_EN (1 << 12)
# define HDMI0_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
# define HDMI0_AUDIO_CHANNEL_SWAP (1 << 24)
# define HDMI0_60958_CS_UPDATE (1 << 26)
# define HDMI0_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define HDMI0_AZ_FORMAT_WTRIG_ACK (1 << 29)
#define HDMI0_AUDIO_CRC_CONTROL 0x740c
# define HDMI0_AUDIO_CRC_EN (1 << 0)
#define HDMI0_VBI_PACKET_CONTROL 0x7410
# define HDMI0_NULL_SEND (1 << 0)
# define HDMI0_GC_SEND (1 << 4)
# define HDMI0_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
#define HDMI0_INFOFRAME_CONTROL0 0x7414
# define HDMI0_AVI_INFO_SEND (1 << 0)
# define HDMI0_AVI_INFO_CONT (1 << 1)
# define HDMI0_AUDIO_INFO_SEND (1 << 4)
# define HDMI0_AUDIO_INFO_CONT (1 << 5)
# define HDMI0_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define HDMI0_AUDIO_INFO_UPDATE (1 << 7)
# define HDMI0_MPEG_INFO_SEND (1 << 8)
# define HDMI0_MPEG_INFO_CONT (1 << 9)
# define HDMI0_MPEG_INFO_UPDATE (1 << 10)
#define HDMI0_INFOFRAME_CONTROL1 0x7418
# define HDMI0_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI0_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI0_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI0_GENERIC_PACKET_CONTROL 0x741c
# define HDMI0_GENERIC0_SEND (1 << 0)
# define HDMI0_GENERIC0_CONT (1 << 1)
# define HDMI0_GENERIC0_UPDATE (1 << 2)
# define HDMI0_GENERIC1_SEND (1 << 4)
# define HDMI0_GENERIC1_CONT (1 << 5)
# define HDMI0_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI0_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
#define HDMI0_GC 0x7428
# define HDMI0_GC_AVMUTE (1 << 0)
#define HDMI0_AVI_INFO0 0x7454
# define HDMI0_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define HDMI0_AVI_INFO_S(x) (((x) & 3) << 8)
# define HDMI0_AVI_INFO_B(x) (((x) & 3) << 10)
# define HDMI0_AVI_INFO_A(x) (((x) & 1) << 12)
# define HDMI0_AVI_INFO_Y(x) (((x) & 3) << 13)
# define HDMI0_AVI_INFO_Y_RGB 0
# define HDMI0_AVI_INFO_Y_YCBCR422 1
# define HDMI0_AVI_INFO_Y_YCBCR444 2
# define HDMI0_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
# define HDMI0_AVI_INFO_R(x) (((x) & 0xf) << 16)
# define HDMI0_AVI_INFO_M(x) (((x) & 0x3) << 20)
# define HDMI0_AVI_INFO_C(x) (((x) & 0x3) << 22)
# define HDMI0_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
# define HDMI0_AVI_INFO_SC(x) (((x) & 0x3) << 24)
# define HDMI0_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
#define HDMI0_AVI_INFO1 0x7458
# define HDMI0_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
# define HDMI0_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
# define HDMI0_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
#define HDMI0_AVI_INFO2 0x745c
# define HDMI0_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
# define HDMI0_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
#define HDMI0_AVI_INFO3 0x7460
# define HDMI0_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
# define HDMI0_AVI_INFO_VERSION(x) (((x) & 3) << 24)
#define HDMI0_MPEG_INFO0 0x7464
# define HDMI0_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define HDMI0_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
# define HDMI0_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
# define HDMI0_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
#define HDMI0_MPEG_INFO1 0x7468
# define HDMI0_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
# define HDMI0_MPEG_INFO_MF(x) (((x) & 3) << 8)
# define HDMI0_MPEG_INFO_FR(x) (((x) & 1) << 12)
#define HDMI0_GENERIC0_HDR 0x746c
#define HDMI0_GENERIC0_0 0x7470
#define HDMI0_GENERIC0_1 0x7474
#define HDMI0_GENERIC0_2 0x7478
#define HDMI0_GENERIC0_3 0x747c
#define HDMI0_GENERIC0_4 0x7480
#define HDMI0_GENERIC0_5 0x7484
#define HDMI0_GENERIC0_6 0x7488
#define HDMI0_GENERIC1_HDR 0x748c
#define HDMI0_GENERIC1_0 0x7490
#define HDMI0_GENERIC1_1 0x7494
#define HDMI0_GENERIC1_2 0x7498
#define HDMI0_GENERIC1_3 0x749c
#define HDMI0_GENERIC1_4 0x74a0
#define HDMI0_GENERIC1_5 0x74a4
#define HDMI0_GENERIC1_6 0x74a8
#define HDMI0_ACR_32_0 0x74ac
# define HDMI0_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
#define HDMI0_ACR_32_1 0x74b0
# define HDMI0_ACR_N_32(x) (((x) & 0xfffff) << 0)
#define HDMI0_ACR_44_0 0x74b4
# define HDMI0_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
#define HDMI0_ACR_44_1 0x74b8
# define HDMI0_ACR_N_44(x) (((x) & 0xfffff) << 0)
#define HDMI0_ACR_48_0 0x74bc
# define HDMI0_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
#define HDMI0_ACR_48_1 0x74c0
# define HDMI0_ACR_N_48(x) (((x) & 0xfffff) << 0)
#define HDMI0_ACR_STATUS_0 0x74c4
#define HDMI0_ACR_STATUS_1 0x74c8
#define HDMI0_AUDIO_INFO0 0x74cc
# define HDMI0_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define HDMI0_AUDIO_INFO_CC(x) (((x) & 7) << 8)
#define HDMI0_AUDIO_INFO1 0x74d0
# define HDMI0_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
# define HDMI0_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
# define HDMI0_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
# define HDMI0_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
#define HDMI0_60958_0 0x74d4
# define HDMI0_60958_CS_A(x) (((x) & 1) << 0)
# define HDMI0_60958_CS_B(x) (((x) & 1) << 1)
# define HDMI0_60958_CS_C(x) (((x) & 1) << 2)
# define HDMI0_60958_CS_D(x) (((x) & 3) << 3)
# define HDMI0_60958_CS_MODE(x) (((x) & 3) << 6)
# define HDMI0_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define HDMI0_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define HDMI0_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define HDMI0_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
#define HDMI0_60958_1 0x74d8
# define HDMI0_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define HDMI0_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define HDMI0_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
#define HDMI0_ACR_PACKET_CONTROL 0x74dc
# define HDMI0_ACR_SEND (1 << 0)
# define HDMI0_ACR_CONT (1 << 1)
# define HDMI0_ACR_SELECT(x) (((x) & 3) << 4)
# define HDMI0_ACR_HW 0
# define HDMI0_ACR_32 1
# define HDMI0_ACR_44 2
# define HDMI0_ACR_48 3
# define HDMI0_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI0_ACR_AUTO_SEND (1 << 12)
#define HDMI0_RAMP_CONTROL0 0x74e0
# define HDMI0_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL1 0x74e4
# define HDMI0_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL2 0x74e8
# define HDMI0_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
#define HDMI0_RAMP_CONTROL3 0x74ec
# define HDMI0_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
/* HDMI0_60958_2 is r7xx only */
#define HDMI0_60958_2 0x74f0
# define HDMI0_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
# define HDMI0_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
# define HDMI0_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
# define HDMI0_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
# define HDMI0_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
# define HDMI0_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
/* r6xx only; second instance starts at 0x7700 */
#define HDMI1_CONTROL 0x7700
#define HDMI1_STATUS 0x7704
#define HDMI1_AUDIO_PACKET_CONTROL 0x7708
/* DCE3; second instance starts at 0x7800 NOT 0x7700 */
#define DCE3_HDMI1_CONTROL 0x7800
#define DCE3_HDMI1_STATUS 0x7804
#define DCE3_HDMI1_AUDIO_PACKET_CONTROL 0x7808
/* DCE3.2 (for interrupts) */
#define AFMT_STATUS 0x7600
# define AFMT_AUDIO_ENABLE (1 << 4)
# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
#define AFMT_AUDIO_PACKET_CONTROL 0x7604
# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
# define AFMT_AUDIO_TEST_EN (1 << 12)
# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
# define AFMT_60958_CS_UPDATE (1 << 26)
# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
 
/*
* PM4
*/
815,7 → 1126,11
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
877,6 → 1192,7
#define PACKET3_SET_CTL_CONST 0x6F
#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
#define PACKET3_SET_CTL_CONST_END 0x0003e200
#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
#define PACKET3_SURFACE_BASE_UPDATE 0x73
 
 
1106,6 → 1422,9
#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
#define C_0280A0_TILE_MODE 0xFFF3FFFF
#define V_0280A0_TILE_DISABLE 0
#define V_0280A0_CLEAR_ENABLE 1
#define V_0280A0_FRAG_ENABLE 2
#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
1352,6 → 1671,12
#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
#define C_038010_DST_SEL_W 0xF1FFFFFF
# define SQ_SEL_X 0
# define SQ_SEL_Y 1
# define SQ_SEL_Z 2
# define SQ_SEL_W 3
# define SQ_SEL_0 4
# define SQ_SEL_1 5
#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
#define C_038010_BASE_LEVEL 0x0FFFFFFF
/drivers/video/drm/radeon/radeon.h
61,9 → 61,10
*/
 
#include <asm/atomic.h>
 
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <asm/div64.h>
 
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
74,7 → 75,6
#include <pci.h>
 
#include <errno-base.h>
#include "drm_edid.h"
 
#include "radeon_family.h"
#include "radeon_mode.h"
82,8 → 82,6
 
#include <syscall.h>
 
extern unsigned long volatile jiffies;
 
/*
* Modules parameters.
*/
102,6 → 100,11
extern int radeon_disp_priority;
extern int radeon_hw_i2c;
extern int radeon_pcie_gen2;
extern int radeon_msi;
extern int radeon_lockup_timeout;
 
 
 
typedef struct pm_message {
int event;
} pm_message_t;
114,53 → 117,8
int freq;
}videomode_t;
 
static inline uint8_t __raw_readb(const volatile void __iomem *addr)
{
return *(const volatile uint8_t __force *) addr;
}
 
static inline uint16_t __raw_readw(const volatile void __iomem *addr)
{
return *(const volatile uint16_t __force *) addr;
}
 
static inline uint32_t __raw_readl(const volatile void __iomem *addr)
{
return *(const volatile uint32_t __force *) addr;
}
 
#define readb __raw_readb
#define readw __raw_readw
#define readl __raw_readl
 
 
 
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr)
{
*(volatile uint8_t __force *) addr = b;
}
 
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr)
{
*(volatile uint16_t __force *) addr = b;
}
 
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr)
{
*(volatile uint32_t __force *) addr = b;
}
 
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr)
{
*(volatile __u64 *)addr = b;
}
 
#define writeb __raw_writeb
#define writew __raw_writew
#define writel __raw_writel
#define writeq __raw_writeq
 
 
static inline u32 ioread32(const volatile void __iomem *addr)
{
return in32((u32)addr);
171,11 → 129,11
out32((u32)addr, b);
}
 
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
//struct __wait_queue_head {
// spinlock_t lock;
// struct list_head task_list;
//};
//typedef struct __wait_queue_head wait_queue_head_t;
 
 
/*
186,10 → 144,29
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2)
/* RADEON_IB_POOL_SIZE must be a power of 2 */
#define RADEON_IB_POOL_SIZE 16
#define RADEON_DEBUGFS_MAX_NUM_FILES 32
#define RADEON_DEBUGFS_MAX_COMPONENTS 32
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
 
/* max number of rings */
#define RADEON_NUM_RINGS 3
 
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
 
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
 
/* cayman has 2 compute CP rings */
#define CAYMAN_RING_TYPE_CP1_INDEX 1
#define CAYMAN_RING_TYPE_CP2_INDEX 2
 
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
#define RADEON_IB_VM_MAX_SIZE (64 << 10)
 
/*
* Errata workarounds.
*/
206,24 → 183,8
/*
* BIOS.
*/
#define ATRM_BIOS_PAGE 4096
 
#if defined(CONFIG_VGA_SWITCHEROO)
bool radeon_atrm_supported(struct pci_dev *pdev);
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
#else
static inline bool radeon_atrm_supported(struct pci_dev *pdev)
{
return false;
}
 
static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
return -EINVAL;
}
#endif
bool radeon_get_bios(struct radeon_device *rdev);
 
 
/*
* Dummy page
*/
263,12 → 224,15
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type);
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage);
void rs690_pm_info(struct radeon_device *rdev);
extern int rv6xx_get_temp(struct radeon_device *rdev);
extern int rv770_get_temp(struct radeon_device *rdev);
extern int evergreen_get_temp(struct radeon_device *rdev);
extern int sumo_get_temp(struct radeon_device *rdev);
extern int si_get_temp(struct radeon_device *rdev);
extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw,
unsigned *bankh, unsigned *mtaspect,
unsigned *tile_split);
 
/*
* Fences.
275,15 → 239,12
*/
struct radeon_fence_driver {
uint32_t scratch_reg;
atomic_t seq;
uint32_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
rwlock_t lock;
struct list_head created;
struct list_head emited;
struct list_head signaled;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
unsigned long last_activity;
bool initialized;
};
 
290,26 → 251,65
struct radeon_fence {
struct radeon_device *rdev;
struct kref kref;
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
bool emited;
bool signaled;
evhandle_t evnt;
uint64_t seq;
/* RB, DMA, etc. */
unsigned ring;
};
 
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
void radeon_fence_process(struct radeon_device *rdev);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
int radeon_fence_wait_next(struct radeon_device *rdev);
int radeon_fence_wait_last(struct radeon_device *rdev);
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring);
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring);
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
bool radeon_fence_need_sync(struct radeon_fence *fence, int ring);
void radeon_fence_note_sync(struct radeon_fence *fence, int ring);
static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a,
struct radeon_fence *b)
{
if (!a) {
return b;
}
 
if (!b) {
return a;
}
 
BUG_ON(a->ring != b->ring);
 
if (a->seq > b->seq) {
return a;
} else {
return b;
}
}
 
static inline bool radeon_fence_is_earlier(struct radeon_fence *a,
struct radeon_fence *b)
{
if (!a) {
return false;
}
 
if (!b) {
return true;
}
 
BUG_ON(a->ring != b->ring);
 
return a->seq < b->seq;
}
 
/*
* Tiling registers
*/
330,6 → 330,24
bool initialized;
};
 
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
struct list_head bo_list;
uint64_t soffset;
uint64_t eoffset;
uint32_t flags;
bool valid;
unsigned ref_count;
 
/* protected by vm mutex */
struct list_head vm_list;
 
/* constant after initialization */
struct radeon_vm *vm;
struct radeon_bo *bo;
};
 
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
345,10 → 363,15
u32 tiling_flags;
u32 pitch;
int surface_reg;
/* list of all virtual address to which this bo
* is associated to
*/
struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
u32 domain;
int vmapping_count;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
360,6 → 383,53
u32 tiling_flags;
};
 
/* sub-allocation manager, it has to be protected by another lock.
* By conception this is an helper for other part of the driver
* like the indirect buffer or semaphore, which both have their
* locking.
*
* Principe is simple, we keep a list of sub allocation in offset
* order (first entry has offset == 0, last entry has the highest
* offset).
*
* When allocating new object we first check if there is room at
* the end total_size - (last_object_offset + last_object_size) >=
* alloc_size. If so we allocate new object there.
*
* When there is not enough room at the end, we start waiting for
* each sub object until we reach object_offset+object_size >=
* alloc_size, this object then become the sub object we return.
*
* Alignment can't be bigger than page size.
*
* Hole are not considered for allocation to keep things simple.
* Assumption is that there won't be hole (all object on same
* alignment).
*/
struct radeon_sa_manager {
wait_queue_head_t wq;
struct radeon_bo *bo;
struct list_head *hole;
struct list_head flist[RADEON_NUM_RINGS];
struct list_head olist;
unsigned size;
uint64_t gpu_addr;
void *cpu_ptr;
uint32_t domain;
};
 
struct radeon_sa_bo;
 
/* sub-allocation buffer */
struct radeon_sa_bo {
struct list_head olist;
struct list_head flist;
struct radeon_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct radeon_fence *fence;
};
 
/*
* GEM objects.
*/
374,9 → 444,6
int alignment, int initial_domain,
bool discardable, bool kernel,
struct drm_gem_object **obj);
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr);
void radeon_gem_object_unpin(struct drm_gem_object *obj);
 
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
389,36 → 456,47
uint32_t handle);
 
/*
* GART structures, functions & helpers
* Semaphores.
*/
struct radeon_mc;
 
struct radeon_gart_table_ram {
volatile uint32_t *ptr;
/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
};
 
struct radeon_gart_table_vram {
struct radeon_bo *robj;
volatile uint32_t *ptr;
};
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore);
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int signaler, int waiter);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
 
union radeon_gart_table {
struct radeon_gart_table_ram ram;
struct radeon_gart_table_vram vram;
};
/*
* GART structures, functions & helpers
*/
struct radeon_mc;
 
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
 
struct radeon_gart {
dma_addr_t table_addr;
struct radeon_bo *robj;
void *ptr;
unsigned num_gpu_pages;
unsigned num_cpu_pages;
unsigned table_size;
union radeon_gart_table table;
struct page **pages;
dma_addr_t *pages_addr;
bool *ttm_alloced;
bool ready;
};
 
426,12 → 504,16
void radeon_gart_table_ram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_alloc(struct radeon_device *rdev);
void radeon_gart_table_vram_free(struct radeon_device *rdev);
int radeon_gart_table_vram_pin(struct radeon_device *rdev);
void radeon_gart_table_vram_unpin(struct radeon_device *rdev);
int radeon_gart_init(struct radeon_device *rdev);
void radeon_gart_fini(struct radeon_device *rdev);
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages);
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32_t *pagelist);
int pages, u32 *pagelist,
dma_addr_t *dma_addr);
void radeon_gart_restore(struct radeon_device *rdev);
 
 
/*
480,6 → 562,7
*/
struct r500_irq_stat_regs {
u32 disp_int;
u32 hdmi0_status;
};
 
struct r600_irq_stat_regs {
488,6 → 571,8
u32 disp_int_cont2;
u32 d1grph_int;
u32 d2grph_int;
u32 hdmi0_status;
u32 hdmi1_status;
};
 
struct evergreen_irq_stat_regs {
503,6 → 588,12
u32 d4grph_int;
u32 d5grph_int;
u32 d6grph_int;
u32 afmt_status1;
u32 afmt_status2;
u32 afmt_status3;
u32 afmt_status4;
u32 afmt_status5;
u32 afmt_status6;
};
 
union radeon_irq_stat_regs {
511,77 → 602,133
struct evergreen_irq_stat_regs evergreen;
};
 
#define RADEON_MAX_HPD_PINS 6
#define RADEON_MAX_CRTCS 6
#define RADEON_MAX_AFMT_BLOCKS 6
 
struct radeon_irq {
bool installed;
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[6];
bool pflip[6];
spinlock_t lock;
atomic_t ring_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
atomic_t pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
bool gui_idle;
bool gui_idle_acked;
wait_queue_head_t idle_queue;
/* FIXME: use defines for max HDMI blocks */
bool hdmi[2];
spinlock_t sw_lock;
int sw_refcount;
bool hpd[RADEON_MAX_HPD_PINS];
bool afmt[RADEON_MAX_AFMT_BLOCKS];
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[6];
int pflip_refcount[6];
};
 
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block);
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask);
 
/*
* CP & ring.
* CP & rings.
*/
 
struct radeon_ib {
struct list_head list;
unsigned idx;
struct radeon_sa_bo *sa_bo;
uint32_t length_dw;
uint64_t gpu_addr;
uint32_t *ptr;
int ring;
struct radeon_fence *fence;
uint32_t *ptr;
uint32_t length_dw;
bool free;
struct radeon_vm *vm;
bool is_const_ib;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_semaphore *semaphore;
};
 
/*
* locking -
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
struct list_head bogus_ib;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
unsigned head_id;
};
 
struct radeon_cp {
struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned rptr_offs;
unsigned rptr_reg;
unsigned rptr_save_reg;
u64 next_rptr_gpu_addr;
volatile u32 *next_rptr_cpu_addr;
unsigned wptr;
unsigned wptr_old;
unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
unsigned long last_activity;
unsigned last_rptr;
uint64_t gpu_addr;
uint32_t align_mask;
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
u32 ptr_reg_shift;
u32 ptr_reg_mask;
u32 nop;
u32 idx;
};
 
/*
* VM
*/
 
/* maximum number of VMIDs */
#define RADEON_NUM_VM 16
 
/* defines number of bits in page table versus page directory,
* a page is 4KB so we have 12 bits offset, 9 bits in the page
* table and the remaining 19 bits are in the page directory */
#define RADEON_VM_BLOCK_SIZE 9
 
/* number of entries in page table */
#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE)
 
struct radeon_vm {
struct list_head list;
struct list_head va;
unsigned id;
 
/* contains the page directory */
struct radeon_sa_bo *page_directory;
uint64_t pd_gpu_addr;
 
/* array of page tables, one for each page directory entry */
struct radeon_sa_bo **page_tables;
 
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
};
 
struct radeon_vm_manager {
struct mutex lock;
struct list_head lru_vm;
struct radeon_fence *active[RADEON_NUM_VM];
struct radeon_sa_manager sa_manager;
uint32_t max_pfn;
/* number of VMIDs */
unsigned nvm;
/* vram base address for page table entry */
u64 vram_base_offset;
/* is vm enabled? */
bool enabled;
};
 
/*
* file private structure
*/
struct radeon_fpriv {
struct radeon_vm vm;
};
 
/*
* R6xx+ IH ring
*/
struct r600_ih {
588,43 → 735,85
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
uint64_t gpu_addr;
uint32_t ptr_mask;
spinlock_t lock;
atomic_t lock;
bool enabled;
};
 
struct r600_blit_cp_primitives {
void (*set_render_target)(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr);
void (*cp_set_surface_sync)(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr);
void (*set_shaders)(struct radeon_device *rdev);
void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr);
void (*set_tex_resource)(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size);
void (*set_scissors)(struct radeon_device *rdev, int x1, int y1,
int x2, int y2);
void (*draw_auto)(struct radeon_device *rdev);
void (*set_default_state)(struct radeon_device *rdev);
};
 
struct r600_blit {
struct mutex mutex;
struct radeon_bo *shader_obj;
struct r600_blit_cp_primitives primitives;
int max_dim;
int ring_size_common;
int ring_size_per_loop;
u64 shader_gpu_addr;
u32 vs_offset, ps_offset;
u32 state_offset;
u32 state_len;
u32 vb_used, vb_total;
struct radeon_ib *vb_ib;
};
 
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
/*
* SI RLC stuff
*/
struct si_rlc {
/* for power gating */
struct radeon_bo *save_restore_obj;
uint64_t save_restore_gpu_addr;
/* for clear state */
struct radeon_bo *clear_state_obj;
uint64_t clear_state_gpu_addr;
};
 
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_ring_tests(struct radeon_device *rdev);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev);
void radeon_ring_unlock_commit(struct radeon_device *rdev);
void radeon_ring_unlock_undo(struct radeon_device *rdev);
int radeon_ring_test(struct radeon_device *rdev);
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
void radeon_ring_fini(struct radeon_device *rdev);
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
struct radeon_ring *ring);
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_undo(struct radeon_ring *ring);
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring);
void radeon_ring_lockup_update(struct radeon_ring *ring);
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring);
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data);
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data);
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
 
 
/*
667,41 → 856,21
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
struct radeon_ib *ib;
int chunk_flags_idx;
int chunk_const_ib_idx;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
unsigned family;
int parser_error;
u32 cs_flags;
u32 ring;
s32 priority;
};
 
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx);
 
 
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
u32 pg_idx, pg_offset;
u32 idx_value = 0;
int new_page;
 
pg_idx = (idx * 4) / PAGE_SIZE;
pg_offset = (idx * 4) % PAGE_SIZE;
 
if (ibc->kpage_idx[0] == pg_idx)
return ibc->kpage[0][pg_offset/4];
if (ibc->kpage_idx[1] == pg_idx)
return ibc->kpage[1][pg_offset/4];
 
new_page = radeon_cs_update_pages(p, pg_idx);
if (new_page < 0) {
p->parser_error = new_page;
return 0;
}
 
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
 
struct radeon_cs_packet {
unsigned idx;
unsigned type;
739,6 → 908,7
};
 
#define RADEON_WB_SCRATCH_OFFSET 0
#define RADEON_WB_RING0_NEXT_RPTR 256
#define RADEON_WB_CP_RPTR_OFFSET 1024
#define RADEON_WB_CP1_RPTR_OFFSET 1280
#define RADEON_WB_CP2_RPTR_OFFSET 1536
831,6 → 1001,7
THERMAL_TYPE_EVERGREEN,
THERMAL_TYPE_SUMO,
THERMAL_TYPE_NI,
THERMAL_TYPE_SI,
};
 
struct radeon_voltage {
868,8 → 1039,7
 
struct radeon_power_state {
enum radeon_pm_state_type type;
/* XXX: use a define for num clock modes */
struct radeon_pm_clock_info clock_info[8];
struct radeon_pm_clock_info *clock_info;
/* number of valid clock modes in this power state */
int num_clock_modes;
struct radeon_pm_clock_info *default_clock_mode;
887,11 → 1057,12
 
struct radeon_pm {
struct mutex mutex;
/* write locked while reprogramming mclk */
struct rw_semaphore mclk_lock;
u32 active_crtcs;
int active_crtc_count;
int req_vblank;
bool vblank_sync;
bool gui_idle;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
939,6 → 1110,17
struct device *int_hwmon_dev;
};
 
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance);
 
struct r600_audio {
int channels;
int rate;
int bits_per_sample;
u8 status_bits;
u8 category_code;
};
/*
* ASIC specific functions.
*/
948,37 → 1130,108
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
bool (*gpu_is_lockup)(struct radeon_device *rdev);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
void (*cp_fini)(struct radeon_device *rdev);
void (*cp_disable)(struct radeon_device *rdev);
void (*cp_commit)(struct radeon_device *rdev);
void (*ring_start)(struct radeon_device *rdev);
int (*ring_test)(struct radeon_device *rdev);
void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
/* check if 3D engine is idle */
bool (*gui_idle)(struct radeon_device *rdev);
/* wait for mc_idle */
int (*mc_wait_for_idle)(struct radeon_device *rdev);
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
 
u32 pt_ring_index;
void (*set_page)(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
} vm;
/* ring specific callbacks */
struct {
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
int (*cs_parse)(struct radeon_cs_parser *p);
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp);
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
} ring[RADEON_NUM_RINGS];
/* irqs */
struct {
int (*set)(struct radeon_device *rdev);
int (*process)(struct radeon_device *rdev);
} irq;
/* displays */
struct {
/* display watermarks */
void (*bandwidth_update)(struct radeon_device *rdev);
/* get frame count */
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
int (*cs_parse)(struct radeon_cs_parser *p);
int (*copy_blit)(struct radeon_device *rdev,
/* wait for vblank */
void (*wait_for_vblank)(struct radeon_device *rdev, int crtc);
/* set backlight level */
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level);
/* get backlight level */
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder);
} display;
/* copy functions for bo handling */
struct {
int (*blit)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
int (*copy_dma)(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
unsigned num_gpu_pages,
struct radeon_fence **fence);
u32 dma_ring_index;
/* method used for bo copy */
int (*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
unsigned num_gpu_pages,
struct radeon_fence **fence);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
/* surfaces */
struct {
int (*set_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void (*clear_reg)(struct radeon_device *rdev, int reg);
} surface;
/* hotplug detect */
struct {
void (*init)(struct radeon_device *rdev);
void (*fini)(struct radeon_device *rdev);
bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
} hpd;
/* power management */
struct {
void (*misc)(struct radeon_device *rdev);
void (*prepare)(struct radeon_device *rdev);
void (*finish)(struct radeon_device *rdev);
void (*init_profile)(struct radeon_device *rdev);
void (*get_dynpm_state)(struct radeon_device *rdev);
uint32_t (*get_engine_clock)(struct radeon_device *rdev);
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
986,48 → 1239,22
int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
/* ioctl hw specific callback. Some hw might want to perform special
* operation on specific ioctl. For instance on wait idle some hw
* might want to perform and HDP flush through MMIO as it seems that
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed
* through ring.
*/
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo);
bool (*gui_idle)(struct radeon_device *rdev);
/* power management */
void (*pm_misc)(struct radeon_device *rdev);
void (*pm_prepare)(struct radeon_device *rdev);
void (*pm_finish)(struct radeon_device *rdev);
void (*pm_init_profile)(struct radeon_device *rdev);
void (*pm_get_dynpm_state)(struct radeon_device *rdev);
} pm;
/* pageflipping */
struct {
void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
void (*post_page_flip)(struct radeon_device *rdev, int crtc);
} pflip;
};
 
/*
* Asic structures
*/
struct r100_gpu_lockup {
unsigned long last_jiffies;
u32 last_cp_rptr;
};
 
struct r100_asic {
const unsigned *reg_safe_bm;
unsigned reg_safe_bm_size;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
 
struct r300_asic {
1035,7 → 1262,6
unsigned reg_safe_bm_size;
u32 resync_scratch;
u32 hdp_cntl;
struct r100_gpu_lockup lockup;
};
 
struct r600_asic {
1057,7 → 1283,6
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
 
struct rv770_asic {
1083,7 → 1308,6
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
 
struct evergreen_asic {
1110,7 → 1334,6
unsigned tiling_group_size;
unsigned tile_config;
unsigned backend_map;
struct r100_gpu_lockup lockup;
};
 
struct cayman_asic {
1149,9 → 1372,37
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
struct r100_gpu_lockup lockup;
};
 
struct si_asic {
unsigned max_shader_engines;
unsigned max_tile_pipes;
unsigned max_cu_per_sh;
unsigned max_sh_per_se;
unsigned max_backends_per_se;
unsigned max_texture_channel_caches;
unsigned max_gprs;
unsigned max_gs_threads;
unsigned max_hw_contexts;
unsigned sc_prim_fifo_size_frontend;
unsigned sc_prim_fifo_size_backend;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_size;
 
unsigned num_tile_pipes;
unsigned num_backends_per_se;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
unsigned mem_max_burst_length_bytes;
unsigned mem_row_size_in_kb;
unsigned shader_engine_tile_size;
unsigned num_gpus;
unsigned multi_gpu_tile_size;
 
unsigned tile_config;
};
 
union radeon_asic_config {
struct r300_asic r300;
struct r100_asic r100;
1159,6 → 1410,7
struct rv770_asic rv770;
struct evergreen_asic evergreen;
struct cayman_asic cayman;
struct si_asic si;
};
 
/*
1169,12 → 1421,14
 
 
 
/* VRAM scratch page for HDP bug */
struct r700_vram_scratch {
/* VRAM scratch page for HDP bug, default vram page */
struct r600_vram_scratch {
struct radeon_bo *robj;
volatile uint32_t *ptr;
u64 gpu_addr;
};
 
 
/*
* Core structure, functions and helpers.
*/
1185,6 → 1439,7
struct device *dev;
struct drm_device *ddev;
struct pci_dev *pdev;
struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
enum radeon_family family;
1202,7 → 1457,7
/* Register mmio */
resource_size_t rmmio_base;
resource_size_t rmmio_size;
void *rmmio;
void __iomem *rmmio;
radeon_rreg_t mc_rreg;
radeon_wreg_t mc_wreg;
radeon_rreg_t pll_rreg;
1219,21 → 1474,19
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
struct radeon_fence_driver fence_drv;
struct radeon_cp cp;
/* cayman compute rings */
struct radeon_cp cp1;
struct radeon_cp cp2;
struct radeon_ib_pool ib_pool;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
struct radeon_sa_manager ring_tmp_bo;
struct radeon_irq irq;
struct radeon_asic *asic;
struct radeon_gem gem;
struct radeon_pm pm;
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
struct mutex cs_mutex;
struct radeon_wb wb;
struct radeon_dummy_page dummy_page;
bool gpu_lockup;
bool shutdown;
bool suspend;
bool need_dma32;
1243,28 → 1496,33
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
const struct firmware *rlc_fw; /* r6/700 RLC firmware */
const struct firmware *mc_fw; /* NI MC firmware */
const struct firmware *ce_fw; /* SI CE firmware */
struct r600_blit r600_blit;
struct r600_blit r600_video;
struct r700_vram_scratch vram_scratch;
struct r600_vram_scratch vram_scratch;
int msi_enabled; /* msi enabled */
struct r600_ih ih; /* r6/700 interrupt ring */
struct si_rlc rlc;
// struct work_struct hotplug_work;
// struct work_struct audio_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
struct mutex vram_mutex;
 
/* audio stuff */
bool audio_enabled;
// struct timer_list audio_timer;
int audio_channels;
int audio_rate;
int audio_bits_per_sample;
uint8_t audio_status_bits;
uint8_t audio_category_code;
 
 
// struct r600_audio audio_status; /* audio stuff */
// struct notifier_block acpi_nb;
/* only one userspace can use Hyperz features or CMASK at a time */
// struct drm_file *hyperz_filp;
// struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
/* debugfs */
// struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
unsigned debugfs_count;
/* virtual memory */
struct radeon_vm_manager vm_manager;
struct mutex gpu_clock_mutex;
/* ACPI interface */
// struct radeon_atif atif;
// struct radeon_atcs atcs;
};
 
int radeon_device_init(struct radeon_device *rdev,
1274,46 → 1532,11
void radeon_device_fini(struct radeon_device *rdev);
int radeon_gpu_wait_for_idle(struct radeon_device *rdev);
 
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
{
if (reg < rdev->rmmio_size)
return readl(((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg);
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v);
 
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
if (reg < rdev->rmmio_size)
writel(v, ((void __iomem *)rdev->rmmio) + reg);
else {
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
}
}
 
static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
{
if (reg < rdev->rio_mem_size)
return ioread32(rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
return ioread32(rdev->rio_mem + RADEON_MM_DATA);
}
}
 
static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v)
{
if (reg < rdev->rio_mem_size)
iowrite32(v, rdev->rio_mem + reg);
else {
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX);
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA);
}
}
 
/*
* Cast helper
*/
1322,10 → 1545,10
/*
* Registers read & write functions.
*/
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg))
#define RREG8(reg) readb((rdev->rmmio) + (reg))
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg))
#define RREG16(reg) readw((rdev->rmmio) + (reg))
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg))
#define RREG32(reg) r100_mm_rreg(rdev, (reg))
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg)))
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
1417,6 → 1640,9
#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \
(rdev->flags & RADEON_IS_IGP))
#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS))
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA))
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
(rdev->flags & RADEON_IS_IGP))
 
/*
* BIOS helpers.
1434,20 → 1660,19
/*
* RING helpers.
*/
static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#if DRM_DEBUG_CODE == 0
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
if (rdev->cp.count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
rdev->cp.ring[rdev->cp.wptr++] = v;
rdev->cp.wptr &= rdev->cp.ptr_mask;
rdev->cp.count_dw--;
rdev->cp.ring_free_dw--;
}
 
 
/*
* ASICs macro.
*/
1455,53 → 1680,64
#define radeon_fini(rdev) (rdev)->asic->fini((rdev))
#define radeon_resume(rdev) (rdev)->asic->resume((rdev))
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags)))
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp))
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp))
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp))
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp))
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm))
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc))
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l))
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev))
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e))
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s)))
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r)))
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev))
#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev))
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev))
#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h))
#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h))
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev))
#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev))
#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev))
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc))
#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev))
#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev))
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev))
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev))
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev))
#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc))
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base))
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc))
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc))
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev))
 
/* Common functions */
/* AGP */
extern int radeon_gpu_reset(struct radeon_device *rdev);
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
1525,12 → 1761,91
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
 
/*
* vm
*/
int radeon_vm_manager_init(struct radeon_device *rdev);
void radeon_vm_manager_fini(struct radeon_device *rdev);
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem);
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo *bo);
struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo);
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t offset,
uint32_t flags);
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va);
 
/* audio */
void r600_audio_update_hdmi(struct work_struct *work);
 
/*
* R600 vram scratch functions
*/
int r600_vram_scratch_init(struct radeon_device *rdev);
void r600_vram_scratch_fini(struct radeon_device *rdev);
 
/*
* r600 cs checking helper
*/
unsigned r600_mip_minify(unsigned size, unsigned level);
bool r600_fmt_is_valid_color(u32 format);
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family);
int r600_fmt_get_blocksize(u32 format);
int r600_fmt_get_nblocksx(u32 format, u32 w);
int r600_fmt_get_nblocksy(u32 format, u32 h);
 
/*
* r600 functions used by radeon_encoder.c
*/
struct radeon_hdmi_acr {
u32 clock;
 
int n_32khz;
int cts_32khz;
 
int n_44_1khz;
int cts_44_1khz;
 
int n_48khz;
int cts_48khz;
 
};
 
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
 
extern void r600_hdmi_enable(struct drm_encoder *encoder);
extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
u32 tiling_pipe_num,
u32 max_rb_num,
u32 total_max_rb_num,
u32 enabled_rb_mask);
 
/*
* evergreen functions used by radeon_encoder.c
*/
 
extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
 
extern int ni_init_microcode(struct radeon_device *rdev);
extern int ni_mc_load_microcode(struct radeon_device *rdev);
 
1537,8 → 1852,10
/* radeon_acpi.c */
#if defined(CONFIG_ACPI)
extern int radeon_acpi_init(struct radeon_device *rdev);
extern void radeon_acpi_fini(struct radeon_device *rdev);
#else
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; }
static inline void radeon_acpi_fini(struct radeon_device *rdev) { }
#endif
 
#include "radeon_object.h"
1555,22 → 1872,4
 
 
 
struct work_struct;
typedef void (*work_func_t)(struct work_struct *work);
 
/*
* The first word is the work queue pointer and the flags rolled into
* one
*/
#define work_data_bits(work) ((unsigned long *)(&(work)->data))
 
struct work_struct {
atomic_long_t data;
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
#define WORK_STRUCT_FLAG_MASK (3UL)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
work_func_t func;
};
 
#endif
/drivers/video/drm/radeon/radeon_asic.c
40,6 → 40,16
/*
* Registers accessors functions.
*/
/**
* radeon_invalid_rreg - dummy reg read function
*
* @rdev: radeon device pointer
* @reg: offset of register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
* Returns the value in the register.
*/
static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
{
DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
47,6 → 57,16
return 0;
}
 
/**
* radeon_invalid_wreg - dummy reg write function
*
* @rdev: radeon device pointer
* @reg: offset of register
* @v: value to write to the register
*
* Dummy register read function. Used for register blocks
* that certain asics don't have (all asics).
*/
static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
{
DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
54,6 → 74,14
BUG_ON(1);
}
 
/**
* radeon_register_accessor_init - sets up the register accessor callbacks
*
* @rdev: radeon device pointer
*
* Sets up the register accessor callbacks for various register
* apertures. Not all asics have all apertures (all asics).
*/
static void radeon_register_accessor_init(struct radeon_device *rdev)
{
rdev->mc_rreg = &radeon_invalid_rreg;
102,6 → 130,14
 
 
/* helper to disable agp */
/**
* radeon_agp_disable - AGP disable helper function
*
* @rdev: radeon device pointer
*
* Removes AGP flags and changes the gart callbacks on AGP
* cards when using the internal gart rather than AGP (all asics).
*/
void radeon_agp_disable(struct radeon_device *rdev)
{
rdev->flags &= ~RADEON_IS_AGP;
114,13 → 150,13
rdev->family == CHIP_R423) {
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart_set_page = &r100_pci_gart_set_page;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
134,38 → 170,74
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic r200_asic = {
174,37 → 246,74
// .suspend = &r100_suspend,
// .resume = &r100_resume,
// .vga_set_state = &r100_vga_set_state,
.gpu_is_lockup = &r100_gpu_is_lockup,
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r100_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r100_cs_parse,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r100_fence_ring_emit,
// .cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic r300_asic = {
214,36 → 323,73
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic r300_asic_pcie = {
253,35 → 399,73
// .resume = &r300_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
.init = &r100_hpd_init,
.fini = &r100_hpd_fini,
.sense = &r100_hpd_sense,
.set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic r420_asic = {
291,36 → 475,73
// .resume = &r420_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic rs400_asic = {
330,36 → 551,73
// .resume = &rs400_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
// .get_vblank_counter = &r100_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &r100_irq_set,
.process = &r100_irq_process,
},
.display = {
.bandwidth_update = &r100_bandwidth_update,
.get_vblank_counter = &r100_get_vblank_counter,
.wait_for_vblank = &r100_wait_for_vblank,
// .set_backlight_level = &radeon_legacy_set_backlight_level,
// .get_backlight_level = &radeon_legacy_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &r100_bandwidth_update,
.hpd_init = &r100_hpd_init,
.hpd_fini = &r100_hpd_fini,
.hpd_sense = &r100_hpd_sense,
.hpd_set_polarity = &r100_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &r100_hpd_init,
// .fini = &r100_hpd_fini,
// .sense = &r100_hpd_sense,
// .set_polarity = &r100_hpd_set_polarity,
},
.pm = {
// .misc = &r100_pm_misc,
// .prepare = &r100_pm_prepare,
// .finish = &r100_pm_finish,
// .init_profile = &r100_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_legacy_get_engine_clock,
// .set_engine_clock = &radeon_legacy_set_engine_clock,
// .get_memory_clock = &radeon_legacy_get_memory_clock,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_legacy_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &r100_pre_page_flip,
// .page_flip = &r100_page_flip,
// .post_page_flip = &r100_post_page_flip,
},
};
 
static struct radeon_asic rs600_asic = {
369,36 → 627,73
// .resume = &rs600_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.bandwidth_update = &rs600_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs600_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic rs690_asic = {
408,36 → 703,73
// .resume = &rs690_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
.set_page = &rs400_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rs690_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r200_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
.sense = &rs600_hpd_sense,
.set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic rv515_asic = {
447,36 → 779,73
// .resume = &rv515_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.get_vblank_counter = &rs600_get_vblank_counter,
.bandwidth_update = &rv515_bandwidth_update,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic r520_asic = {
486,36 → 855,73
// .resume = &r520_resume,
// .vga_set_state = &r100_vga_set_state,
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
.cp_commit = &r100_cp_commit,
// .ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r100_ring_ib_execute,
.emit_fence = &r300_fence_ring_emit,
.emit_semaphore = &r100_semaphore_ring_emit,
// .cs_parse = &r300_cs_parse,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
.ring_ib_execute = &r100_ring_ib_execute,
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
// .get_vblank_counter = &rs600_get_vblank_counter,
.fence_ring_emit = &r300_fence_ring_emit,
// .cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
.ib_test = &r100_ib_test,
.is_lockup = &r100_gpu_is_lockup,
}
},
.irq = {
.set = &rs600_irq_set,
.process = &rs600_irq_process,
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r100_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &r200_copy_dma,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
.clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
.hpd_sense = &rs600_hpd_sense,
.hpd_set_polarity = &rs600_hpd_set_polarity,
.ioctl_wait_idle = NULL,
.gui_idle = &r100_gui_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r100_set_surface_reg,
.clear_reg = r100_clear_surface_reg,
},
.hpd = {
// .init = &rs600_hpd_init,
// .fini = &rs600_hpd_fini,
// .sense = &rs600_hpd_sense,
// .set_polarity = &rs600_hpd_set_polarity,
},
.pm = {
// .misc = &rs600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r420_pm_init_profile,
// .get_dynpm_state = &r100_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &rv370_get_pcie_lanes,
// .set_pcie_lanes = &rv370_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic r600_asic = {
523,35 → 929,74
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
// .vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
// .misc = &r600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic rs780_asic = {
559,35 → 1004,74
// .fini = &r600_fini,
// .suspend = &r600_suspend,
// .resume = &r600_resume,
.cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
// .vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.bandwidth_update = &rs690_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rs690_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
// .misc = &r600_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &rs780_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = NULL,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rs600_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic rv770_asic = {
595,34 → 1079,74
// .fini = &rv770_fini,
// .suspend = &rv770_suspend,
// .resume = &rv770_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &r600_mc_wait_for_idle,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &r600_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &r600_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &r600_ring_ib_execute,
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
.ib_test = &r600_ib_test,
.is_lockup = &r600_gpu_is_lockup,
}
},
.irq = {
.set = &r600_irq_set,
.process = &r600_irq_process,
},
.display = {
.bandwidth_update = &rv515_bandwidth_update,
.get_vblank_counter = &rs600_get_vblank_counter,
.wait_for_vblank = &avivo_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
.hpd_init = &r600_hpd_init,
.hpd_fini = &r600_hpd_fini,
.hpd_sense = &r600_hpd_sense,
.hpd_set_polarity = &r600_hpd_set_polarity,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &r600_hpd_init,
// .fini = &r600_hpd_fini,
// .sense = &r600_hpd_sense,
// .set_polarity = &r600_hpd_set_polarity,
},
.pm = {
// .misc = &rv770_pm_misc,
// .prepare = &rs600_pm_prepare,
// .finish = &rs600_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = &radeon_atom_set_clock_gating,
},
.pflip = {
// .pre_page_flip = &rs600_pre_page_flip,
// .page_flip = &rv770_page_flip,
// .post_page_flip = &rs600_post_page_flip,
},
};
 
static struct radeon_asic evergreen_asic = {
630,35 → 1154,74
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = &r600_get_pcie_lanes,
.set_pcie_lanes = &r600_set_pcie_lanes,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
 
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &r600_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = &r600_get_pcie_lanes,
// .set_pcie_lanes = &r600_set_pcie_lanes,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic sumo_asic = {
666,34 → 1229,74
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &r600_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = NULL,
.set_memory_clock = NULL,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
},
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = NULL,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic btc_asic = {
701,71 → 1304,401
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &evergreen_ring_ib_execute,
.emit_fence = &r600_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &btc_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
// .fini = &evergreen_fini,
// .suspend = &evergreen_suspend,
// .resume = &evergreen_resume,
.cp_commit = &r600_cp_commit,
// .fini = &cayman_fini,
// .suspend = &cayman_suspend,
// .resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ring_ib_execute = &evergreen_ring_ib_execute,
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.fence_ring_emit = &r600_fence_ring_emit,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.copy_blit = &evergreen_copy_blit,
.copy_dma = NULL,
.copy = &evergreen_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
.get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &evergreen_bandwidth_update,
.hpd_init = &evergreen_hpd_init,
.hpd_fini = &evergreen_hpd_fini,
.hpd_sense = &evergreen_hpd_sense,
.hpd_set_polarity = &evergreen_hpd_set_polarity,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &btc_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic trinity_asic = {
.init = &cayman_init,
// .fini = &cayman_fini,
// .suspend = &cayman_suspend,
// .resume = &cayman_resume,
.asic_reset = &cayman_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &cayman_vm_init,
.fini = &cayman_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &cayman_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &cayman_ring_ib_execute,
// .ib_parse = &evergreen_ib_parse,
.emit_fence = &cayman_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = &evergreen_cs_parse,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &evergreen_gpu_is_lockup,
.vm_flush = &cayman_vm_flush,
}
},
.irq = {
.set = &evergreen_irq_set,
.process = &evergreen_irq_process,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = &r600_copy_blit,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = &r600_copy_blit,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
// .init = &evergreen_hpd_init,
// .fini = &evergreen_hpd_fini,
// .sense = &evergreen_hpd_sense,
// .set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = NULL,
// .set_memory_clock = NULL,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
static struct radeon_asic si_asic = {
.init = &si_init,
// .fini = &si_fini,
// .suspend = &si_suspend,
// .resume = &si_resume,
.asic_reset = &si_asic_reset,
// .vga_set_state = &r600_vga_set_state,
// .ioctl_wait_idle = r600_ioctl_wait_idle,
.gui_idle = &r600_gui_idle,
.mc_wait_for_idle = &evergreen_mc_wait_for_idle,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
.set_page = &rs600_gart_set_page,
},
.vm = {
.init = &si_vm_init,
.fini = &si_vm_fini,
.pt_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.set_page = &si_vm_set_page,
},
.ring = {
[RADEON_RING_TYPE_GFX_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP1_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
},
[CAYMAN_RING_TYPE_CP2_INDEX] = {
.ib_execute = &si_ring_ib_execute,
// .ib_parse = &si_ib_parse,
.emit_fence = &si_fence_ring_emit,
.emit_semaphore = &r600_semaphore_ring_emit,
// .cs_parse = NULL,
.ring_test = &r600_ring_test,
.ib_test = &r600_ib_test,
.is_lockup = &si_gpu_is_lockup,
.vm_flush = &si_vm_flush,
}
},
.irq = {
.set = &si_irq_set,
.process = &si_irq_process,
},
.display = {
.bandwidth_update = &dce6_bandwidth_update,
.get_vblank_counter = &evergreen_get_vblank_counter,
.wait_for_vblank = &dce4_wait_for_vblank,
// .set_backlight_level = &atombios_set_backlight_level,
// .get_backlight_level = &atombios_get_backlight_level,
},
.copy = {
.blit = NULL,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = NULL,
.dma_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.copy = NULL,
.copy_ring_index = RADEON_RING_TYPE_GFX_INDEX,
},
.surface = {
.set_reg = r600_set_surface_reg,
.clear_reg = r600_clear_surface_reg,
},
.hpd = {
.init = &evergreen_hpd_init,
.fini = &evergreen_hpd_fini,
.sense = &evergreen_hpd_sense,
.set_polarity = &evergreen_hpd_set_polarity,
},
.pm = {
// .misc = &evergreen_pm_misc,
// .prepare = &evergreen_pm_prepare,
// .finish = &evergreen_pm_finish,
// .init_profile = &sumo_pm_init_profile,
// .get_dynpm_state = &r600_pm_get_dynpm_state,
// .get_engine_clock = &radeon_atom_get_engine_clock,
// .set_engine_clock = &radeon_atom_set_engine_clock,
// .get_memory_clock = &radeon_atom_get_memory_clock,
// .set_memory_clock = &radeon_atom_set_memory_clock,
// .get_pcie_lanes = NULL,
// .set_pcie_lanes = NULL,
// .set_clock_gating = NULL,
},
.pflip = {
// .pre_page_flip = &evergreen_pre_page_flip,
// .page_flip = &evergreen_page_flip,
// .post_page_flip = &evergreen_post_page_flip,
},
};
 
/**
* radeon_asic_init - register asic specific callbacks
*
* @rdev: radeon device pointer
*
* Registers the appropriate asic specific callbacks for each
* chip family. Also sets other asics specific info like the number
* of crtcs and the register aperture accessors (all asics).
* Returns 0 for success.
*/
int radeon_asic_init(struct radeon_device *rdev)
{
radeon_register_accessor_init(rdev);
805,10 → 1738,11
rdev->asic = &r420_asic;
/* handle macs */
if (rdev->bios == NULL) {
rdev->asic->get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->set_memory_clock = NULL;
rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock;
rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock;
rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock;
rdev->asic->pm.set_memory_clock = NULL;
rdev->asic->display.set_backlight_level = &radeon_legacy_set_backlight_level;
}
break;
case CHIP_RS400:
882,6 → 1816,18
/* set num crtcs */
rdev->num_crtc = 6;
break;
case CHIP_ARUBA:
rdev->asic = &trinity_asic;
/* set num crtcs */
rdev->num_crtc = 4;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
rdev->asic = &si_asic;
/* set num crtcs */
rdev->num_crtc = 6;
break;
default:
/* FIXME: not supported yet */
return -EINVAL;
888,8 → 1834,8
}
 
if (rdev->flags & RADEON_IS_IGP) {
rdev->asic->get_memory_clock = NULL;
rdev->asic->set_memory_clock = NULL;
rdev->asic->pm.get_memory_clock = NULL;
rdev->asic->pm.set_memory_clock = NULL;
}
 
return 0;
/drivers/video/drm/radeon/radeon_asic.h
42,6 → 42,12
void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock);
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
 
void atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 atombios_get_backlight_level(struct radeon_encoder *radeon_encoder);
void radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level);
u8 radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder);
 
 
/*
* r100,rv100,rs100,rv200,rs200
*/
58,17 → 64,20
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
bool r100_gpu_is_lockup(struct radeon_device *rdev);
bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
void r100_cp_commit(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void r100_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
75,8 → 84,8
int r100_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
unsigned num_gpu_pages,
struct radeon_fence **fence);
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
83,7 → 92,7
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
100,13 → 109,7
void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
struct radeon_cp *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
struct radeon_cp *cp);
void r100_ib_fini(struct radeon_device *rdev);
int r100_ib_init(struct radeon_device *rdev);
int r100_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
136,6 → 139,8
extern void r100_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 r100_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void r100_post_page_flip(struct radeon_device *rdev, int crtc);
extern void r100_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int r100_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* r200,rv250,rs300,rv280
143,8 → 148,8
extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
unsigned num_gpu_pages,
struct radeon_fence **fence);
void r200_set_safe_registers(struct radeon_device *rdev);
 
/*
154,9 → 159,8
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
173,6 → 177,7
extern void rv370_pcie_gart_fini(struct radeon_device *rdev);
extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* r420,r423,rv410
203,6 → 208,7
void rs400_gart_adjust_size(struct radeon_device *rdev);
void rs400_gart_disable(struct radeon_device *rdev);
void rs400_gart_fini(struct radeon_device *rdev);
extern int rs400_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* rs600.
233,8 → 239,9
extern u32 rs600_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void rs600_post_page_flip(struct radeon_device *rdev, int crtc);
void rs600_set_safe_registers(struct radeon_device *rdev);
extern void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc);
extern int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
 
/*
* rs690,rs740
*/
248,23 → 255,21
void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode1,
struct drm_display_mode *mode2);
extern int rs690_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* rv515
*/
struct rv515_mc_save {
u32 d1vga_control;
u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
u32 d1crtc_control;
u32 d2crtc_control;
};
 
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_ring_start(struct radeon_device *rdev);
void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
275,13 → 280,14
void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
void rv515_clock_startup(struct radeon_device *rdev);
void rv515_debugfs(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
 
/*
* r520,rv530,rv560,rv570,r580
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
int r520_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
293,7 → 299,6
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
void r600_cp_commit(struct radeon_device *rdev);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
300,18 → 305,22
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
bool r600_gpu_is_lockup(struct radeon_device *rdev);
void r600_semaphore_ring_emit(struct radeon_device *rdev,
struct radeon_ring *cp,
struct radeon_semaphore *semaphore,
bool emit_wait);
bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
int r600_ib_test(struct radeon_device *rdev);
int r600_ib_test(struct radeon_device *rdev, struct radeon_ring *ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
unsigned num_gpu_pages, struct radeon_fence **fence);
void r600_hpd_init(struct radeon_device *rdev);
void r600_hpd_fini(struct radeon_device *rdev);
bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
328,7 → 337,7
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
349,26 → 358,23
void r600_rlc_stop(struct radeon_device *rdev);
/* r600 audio */
int r600_audio_init(struct radeon_device *rdev);
int r600_audio_tmds_index(struct drm_encoder *encoder);
void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
int r600_audio_channels(struct radeon_device *rdev);
int r600_audio_bits_per_sample(struct radeon_device *rdev);
int r600_audio_rate(struct radeon_device *rdev);
uint8_t r600_audio_status_bits(struct radeon_device *rdev);
uint8_t r600_audio_category_code(struct radeon_device *rdev);
void r600_audio_schedule_polling(struct radeon_device *rdev);
void r600_audio_enable_polling(struct drm_encoder *encoder);
void r600_audio_disable_polling(struct drm_encoder *encoder);
struct r600_audio r600_audio_status(struct radeon_device *rdev);
void r600_audio_fini(struct radeon_device *rdev);
void r600_hdmi_init(struct drm_encoder *encoder);
int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
void r600_hdmi_update_audio_settings(struct drm_encoder *encoder);
/* r600 blit */
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages,
struct radeon_fence **fence, struct radeon_sa_bo **vb,
struct radeon_semaphore **sem);
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence **fence,
struct radeon_sa_bo *vb, struct radeon_semaphore *sem);
void r600_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
unsigned num_gpu_pages,
struct radeon_sa_bo *vb);
int r600_mc_wait_for_idle(struct radeon_device *rdev);
uint64_t r600_get_gpu_clock(struct radeon_device *rdev);
 
/*
* rv770,rv730,rv710,rv740
387,23 → 393,20
* evergreen
*/
struct evergreen_mc_save {
u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
u32 crtc_control[6];
bool crtc_enabled[RADEON_MAX_CRTCS];
};
 
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int evergreen_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_pages, struct radeon_fence *fence);
void evergreen_hpd_init(struct radeon_device *rdev);
void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
416,28 → 419,62
extern void evergreen_pm_misc(struct radeon_device *rdev);
extern void evergreen_pm_prepare(struct radeon_device *rdev);
extern void evergreen_pm_finish(struct radeon_device *rdev);
extern void sumo_pm_init_profile(struct radeon_device *rdev);
extern void btc_pm_init_profile(struct radeon_device *rdev);
extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
extern void dce4_wait_for_vblank(struct radeon_device *rdev, int crtc);
void evergreen_disable_interrupt_state(struct radeon_device *rdev);
int evergreen_blit_init(struct radeon_device *rdev);
void evergreen_blit_fini(struct radeon_device *rdev);
/* evergreen blit */
int evergreen_blit_prepare_copy(struct radeon_device *rdev, int size_bytes);
void evergreen_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence);
void evergreen_kms_blit_copy(struct radeon_device *rdev,
u64 src_gpu_addr, u64 dst_gpu_addr,
int size_bytes);
int evergreen_mc_wait_for_idle(struct radeon_device *rdev);
 
/*
* cayman
*/
void cayman_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
bool cayman_gpu_is_lockup(struct radeon_device *rdev);
int cayman_asic_reset(struct radeon_device *rdev);
void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int cayman_vm_init(struct radeon_device *rdev);
void cayman_vm_fini(struct radeon_device *rdev);
void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
 
/* DCE6 - SI */
void dce6_bandwidth_update(struct radeon_device *rdev);
 
/*
* si
*/
void si_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
void si_pcie_gart_tlb_flush(struct radeon_device *rdev);
int si_init(struct radeon_device *rdev);
void si_fini(struct radeon_device *rdev);
int si_suspend(struct radeon_device *rdev);
int si_resume(struct radeon_device *rdev);
bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int si_asic_reset(struct radeon_device *rdev);
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int si_irq_set(struct radeon_device *rdev);
int si_irq_process(struct radeon_device *rdev);
int si_vm_init(struct radeon_device *rdev);
void si_vm_fini(struct radeon_device *rdev);
void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags);
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
uint64_t si_get_gpu_clock(struct radeon_device *rdev);
 
#endif
/drivers/video/drm/radeon/radeon_atombios.c
23,8 → 23,8
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
#include "atom.h"
56,6 → 56,10
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum,
uint32_t supported_device);
 
/* local */
static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage);
 
union atom_supported_devices {
struct _ATOM_SUPPORTED_DEVICES_INFO info;
struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2;
62,32 → 66,25
struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
};
 
static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
ATOM_GPIO_I2C_ASSIGMENT *gpio,
u8 index)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
/* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
if ((rdev->family == CHIP_R420) ||
(rdev->family == CHIP_R423) ||
(rdev->family == CHIP_RV410)) {
if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
gpio->ucClkMaskShift = 0x19;
gpio->ucDataMaskShift = 0x18;
}
}
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
 
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
if ((index == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
100,13 → 97,19
 
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
if ((index == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
}
}
 
if (gpio->sucI2cId.ucAccess == id) {
static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
{
struct radeon_i2c_bus_rec i2c;
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
 
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
138,15 → 141,14
 
if (i2c.mask_clk_reg)
i2c.valid = true;
break;
}
}
}
else
i2c.valid = false;
 
return i2c;
}
 
void radeon_atombios_i2c_init(struct radeon_device *rdev)
static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
uint8_t id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
155,9 → 157,9
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
char stmp[32];
 
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
 
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
167,60 → 169,44
 
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
i2c.valid = false;
 
/* some evergreen boards have bad data for this entry */
if (ASIC_IS_DCE4(rdev)) {
if ((i == 7) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
(gpio->sucI2cId.ucAccess == 0)) {
gpio->sucI2cId.ucAccess = 0x97;
gpio->ucDataMaskShift = 8;
gpio->ucDataEnShift = 8;
gpio->ucDataY_Shift = 8;
gpio->ucDataA_Shift = 8;
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
if (gpio->sucI2cId.ucAccess == id) {
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
break;
}
}
}
 
/* some DCE3 boards have bad data for this entry */
if (ASIC_IS_DCE3(rdev)) {
if ((i == 4) &&
(le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
(gpio->sucI2cId.ucAccess == 0x94))
gpio->sucI2cId.ucAccess = 0x14;
return i2c;
}
 
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
i2c.en_data_mask = (1 << gpio->ucDataEnShift);
i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
void radeon_atombios_i2c_init(struct radeon_device *rdev)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
ATOM_GPIO_I2C_ASSIGMENT *gpio;
struct radeon_i2c_bus_rec i2c;
int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
struct _ATOM_GPIO_I2C_INFO *i2c_info;
uint16_t data_offset, size;
int i, num_indices;
char stmp[32];
 
if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
i2c.hw_capable = true;
else
i2c.hw_capable = false;
if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
if (gpio->sucI2cId.ucAccess == 0xa0)
i2c.mm_i2c = true;
else
i2c.mm_i2c = false;
num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
sizeof(ATOM_GPIO_I2C_ASSIGMENT);
 
i2c.i2c_id = gpio->sucI2cId.ucAccess;
for (i = 0; i < num_indices; i++) {
gpio = &i2c_info->asGPIO_Info[i];
 
if (i2c.mask_clk_reg) {
i2c.valid = true;
radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
 
i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
 
if (i2c.valid) {
sprintf(stmp, "0x%x", i2c.i2c_id);
rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
}
228,7 → 214,7
}
}
 
static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
u8 id)
{
struct atom_context *ctx = rdev->mode_info.atom_context;
271,7 → 257,9
 
memset(&hpd, 0, sizeof(struct radeon_hpd));
 
if (ASIC_IS_DCE4(rdev))
if (ASIC_IS_DCE6(rdev))
reg = SI_DC_GPIO_HPD_A;
else if (ASIC_IS_DCE4(rdev))
reg = EVERGREEN_DC_GPIO_HPD_A;
else
reg = AVIVO_DC_GPIO_HPD_A;
456,10 → 444,26
*/
if ((dev->pdev->device == 0x9498) &&
(dev->pdev->subsystem_vendor == 0x1682) &&
(dev->pdev->subsystem_device == 0x2452)) {
(dev->pdev->subsystem_device == 0x2452) &&
(i2c_bus->valid == false) &&
!(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) {
struct radeon_device *rdev = dev->dev_private;
*i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93);
}
 
/* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */
if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) &&
(dev->pdev->subsystem_vendor == 0x1734) &&
(dev->pdev->subsystem_device == 0x11bd)) {
if (*connector_type == DRM_MODE_CONNECTOR_VGA) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
*line_mux = 0x3103;
} else if (*connector_type == DRM_MODE_CONNECTOR_DVID) {
*connector_type = DRM_MODE_CONNECTOR_DVII;
}
}
 
 
return true;
}
 
1250,6 → 1254,10
if (rdev->clock.max_pixel_clock == 0)
rdev->clock.max_pixel_clock = 40000;
 
/* not technically a clock, but... */
rdev->mode_info.firmware_flags =
le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
 
return true;
}
 
1259,6 → 1267,8
union igp_info {
struct _ATOM_INTEGRATED_SYSTEM_INFO info;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
};
 
bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1386,7 → 1396,7
struct radeon_mode_info *mode_info = &rdev->mode_info;
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
u16 data_offset, size;
struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info;
union igp_info *igp_info;
u8 frev, crev;
u16 percentage = 0, rate = 0;
 
1393,22 → 1403,45
/* get any igp specific overrides */
if (atom_parse_data_header(mode_info->atom_context, index, &size,
&frev, &crev, &data_offset)) {
igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *)
igp_info = (union igp_info *)
(mode_info->atom_context->bios + data_offset);
switch (crev) {
case 6:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->usDVISSPercentage);
rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz);
percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->usHDMISSPercentage);
rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz);
percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz);
percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
break;
}
break;
case 7:
switch (id) {
case ASIC_INTERNAL_SS_ON_TMDS:
percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_HDMI:
percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
break;
case ASIC_INTERNAL_SS_ON_LVDS:
percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
break;
}
break;
default:
DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
break;
}
if (percentage)
ss->percentage = percentage;
if (rate)
1892,6 → 1925,8
"emc2103",
"Sumo",
"Northern Islands",
"Southern Islands",
"lm96163",
};
 
union power_info {
1908,6 → 1943,7
struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
};
 
union pplib_power_state {
1973,7 → 2009,8
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
/* add the i2c bus for thermal/fan chip */
if (power_info->info.ucOverdriveThermalController > 0) {
if ((power_info->info.ucOverdriveThermalController > 0) &&
(power_info->info.ucOverdriveThermalController < ARRAY_SIZE(thermal_controller_names))) {
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
thermal_controller_names[power_info->info.ucOverdriveThermalController],
power_info->info.ucOverdriveControllerAddress >> 1);
1996,10 → 2033,14
return state_index;
/* last mode is usually default, array is low to high */
for (i = 0; i < num_modes; i++) {
rdev->pm.power_state[state_index].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
if (!rdev->pm.power_state[state_index].clock_info)
return state_index;
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
switch (frev) {
case 1:
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
2035,7 → 2076,6
state_index++;
break;
case 2:
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
2072,7 → 2112,6
state_index++;
break;
case 3:
rdev->pm.power_state[state_index].num_clock_modes = 1;
rdev->pm.power_state[state_index].clock_info[0].mclk =
le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
rdev->pm.power_state[state_index].clock_info[0].sclk =
2163,6 → 2202,11
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_NI;
} else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
DRM_INFO("Internal thermal controller %s fan control\n",
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
rdev->pm.int_thermal_type = THERMAL_TYPE_SI;
} else if ((controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) ||
(controller->ucType ==
2170,7 → 2214,7
(controller->ucType ==
ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) {
DRM_INFO("Special thermal controller config\n");
} else {
} else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
pp_lib_thermal_controller_names[controller->ucType],
controller->ucI2cAddress >> 1,
2185,6 → 2229,12
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
} else {
DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
controller->ucType,
controller->ucI2cAddress >> 1,
(controller->ucFanParameters &
ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
}
}
}
2257,7 → 2307,7
rdev->pm.default_power_state_index = state_index;
rdev->pm.power_state[state_index].default_clock_mode =
&rdev->pm.power_state[state_index].clock_info[mode_index - 1];
if (ASIC_IS_DCE5(rdev)) {
if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
/* NI chips post without MC ucode, so default clocks are strobe mode only */
rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
2283,6 → 2333,7
union pplib_clock_info *clock_info)
{
u32 sclk, mclk;
u16 vddc;
 
if (rdev->flags & RADEON_IS_IGP) {
if (rdev->family >= CHIP_PALM) {
2294,6 → 2345,19
sclk |= clock_info->rs780.ucLowEngineClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
}
} else if (ASIC_IS_DCE6(rdev)) {
sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
sclk |= clock_info->si.ucEngineClockHigh << 16;
mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
mclk |= clock_info->si.ucMemoryClockHigh << 16;
rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
VOLTAGE_SW;
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
le16_to_cpu(clock_info->si.usVDDC);
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci =
le16_to_cpu(clock_info->si.usVDDCI);
} else if (ASIC_IS_DCE4(rdev)) {
sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2321,11 → 2385,18
}
 
/* patch up vddc if necessary */
if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) {
u16 vddc;
 
if (radeon_atom_get_max_vddc(rdev, &vddc) == 0)
switch (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage) {
case ATOM_VIRTUAL_VOLTAGE_ID0:
case ATOM_VIRTUAL_VOLTAGE_ID1:
case ATOM_VIRTUAL_VOLTAGE_ID2:
case ATOM_VIRTUAL_VOLTAGE_ID3:
if (radeon_atom_get_max_vddc(rdev, VOLTAGE_TYPE_VDDC,
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage,
&vddc) == 0)
rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc;
break;
default:
break;
}
 
if (rdev->flags & RADEON_IS_IGP) {
2377,6 → 2448,13
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
(power_state->v1.ucNonClockStateIndex *
power_info->pplib.ucNonClockSize));
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
((power_info->pplib.ucStateEntrySize - 1) ?
(power_info->pplib.ucStateEntrySize - 1) : 1),
GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info)
return state_index;
if (power_info->pplib.ucStateEntrySize - 1) {
for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
clock_info = (union pplib_clock_info *)
(mode_info->atom_context->bios + data_offset +
2389,6 → 2467,13
if (valid)
mode_index++;
}
} else {
rdev->pm.power_state[state_index].clock_info[0].mclk =
rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk =
rdev->clock.default_sclk;
mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
2421,9 → 2506,9
int i, j, non_clock_array_index, clock_array_index;
int state_index = 0, mode_index = 0;
union pplib_clock_info *clock_info;
struct StateArray *state_array;
struct ClockInfoArray *clock_info_array;
struct NonClockInfoArray *non_clock_info_array;
struct _StateArray *state_array;
struct _ClockInfoArray *clock_info_array;
struct _NonClockInfoArray *non_clock_info_array;
bool valid;
union power_info *power_info;
int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2436,13 → 2521,13
power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
 
radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
state_array = (struct StateArray *)
state_array = (struct _StateArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usStateArrayOffset));
clock_info_array = (struct ClockInfoArray *)
clock_info_array = (struct _ClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
non_clock_info_array = (struct NonClockInfoArray *)
non_clock_info_array = (struct _NonClockInfoArray *)
(mode_info->atom_context->bios + data_offset +
le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2456,6 → 2541,13
non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
&non_clock_info_array->nonClockInfo[non_clock_array_index];
rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
(power_state->v2.ucNumDPMLevels ?
power_state->v2.ucNumDPMLevels : 1),
GFP_KERNEL);
if (!rdev->pm.power_state[i].clock_info)
return state_index;
if (power_state->v2.ucNumDPMLevels) {
for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
clock_array_index = power_state->v2.clockInfoIndex[j];
/* XXX this might be an inagua bug... */
2462,7 → 2554,7
if (clock_array_index >= clock_info_array->ucNumEntries)
continue;
clock_info = (union pplib_clock_info *)
&clock_info_array->clockInfo[clock_array_index];
&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
valid = radeon_atombios_parse_pplib_clock_info(rdev,
state_index, mode_index,
clock_info);
2469,6 → 2561,13
if (valid)
mode_index++;
}
} else {
rdev->pm.power_state[state_index].clock_info[0].mclk =
rdev->clock.default_mclk;
rdev->pm.power_state[state_index].clock_info[0].sclk =
rdev->clock.default_sclk;
mode_index++;
}
rdev->pm.power_state[state_index].num_clock_modes = mode_index;
if (mode_index) {
radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index,
2524,6 → 2623,9
} else {
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
if (rdev->pm.power_state) {
rdev->pm.power_state[0].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
if (rdev->pm.power_state[0].clock_info) {
/* add the default mode */
rdev->pm.power_state[state_index].type =
POWER_STATE_TYPE_DEFAULT;
2539,12 → 2641,17
state_index++;
}
}
}
 
rdev->pm.num_power_states = state_index;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
if (rdev->pm.default_power_state_index >= 0)
rdev->pm.current_vddc =
rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage;
else
rdev->pm.current_vddc = 0;
}
 
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
2604,6 → 2711,7
struct _SET_VOLTAGE_PS_ALLOCATION alloc;
struct _SET_VOLTAGE_PARAMETERS v1;
struct _SET_VOLTAGE_PARAMETERS_V2 v2;
struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
};
 
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type)
2630,6 → 2738,11
args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE;
args.v2.usVoltageLevel = cpu_to_le16(voltage_level);
break;
case 3:
args.v3.ucVoltageType = voltage_type;
args.v3.ucVoltageMode = ATOM_SET_VOLTAGE;
args.v3.usVoltageLevel = cpu_to_le16(voltage_level);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return;
2638,8 → 2751,8
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
int radeon_atom_get_max_vddc(struct radeon_device *rdev,
u16 *voltage)
static int radeon_atom_get_max_vddc(struct radeon_device *rdev, u8 voltage_type,
u16 voltage_id, u16 *voltage)
{
union set_voltage args;
int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
2660,6 → 2773,15
 
*voltage = le16_to_cpu(args.v2.usVoltageLevel);
break;
case 3:
args.v3.ucVoltageType = voltage_type;
args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
*voltage = le16_to_cpu(args.v3.usVoltageLevel);
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
return -EINVAL;
2911,6 → 3033,20
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
}
}
if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
(radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
if (connected) {
DRM_DEBUG_KMS("DFP6 connected\n");
bios_0_scratch |= ATOM_S0_DFP6;
bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
} else {
DRM_DEBUG_KMS("DFP6 disconnected\n");
bios_0_scratch &= ~ATOM_S0_DFP6;
bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
}
}
 
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
2931,6 → 3067,9
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_3_scratch;
 
if (ASIC_IS_DCE4(rdev))
return;
 
if (rdev->family >= CHIP_R600)
bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
else
2983,6 → 3122,9
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_2_scratch;
 
if (ASIC_IS_DCE4(rdev))
return;
 
if (rdev->family >= CHIP_R600)
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
else
/drivers/video/drm/radeon/radeon_benchmark.c
26,33 → 26,81
#include "radeon_reg.h"
#include "radeon.h"
 
unsigned int inline jiffies_to_msecs(const unsigned long j)
#define RADEON_BENCHMARK_COPY_BLIT 1
#define RADEON_BENCHMARK_COPY_DMA 0
 
#define RADEON_BENCHMARK_ITERATIONS 1024
#define RADEON_BENCHMARK_COMMON_MODES_N 17
 
static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
uint64_t saddr, uint64_t daddr,
int flag, int n)
{
return (10 * j);
};
unsigned long start_jiffies;
unsigned long end_jiffies;
struct radeon_fence *fence = NULL;
int i, r;
 
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
switch (flag) {
case RADEON_BENCHMARK_COPY_DMA:
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
&fence);
break;
case RADEON_BENCHMARK_COPY_BLIT:
r = radeon_copy_blit(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE,
&fence);
break;
default:
DRM_ERROR("Unknown copy method\n");
r = -EINVAL;
}
if (r)
goto exit_do_move;
r = radeon_fence_wait(fence, false);
if (r)
goto exit_do_move;
radeon_fence_unref(&fence);
}
end_jiffies = GetTimerTicks();
r = jiffies_to_msecs(end_jiffies - start_jiffies);
 
void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
exit_do_move:
if (fence)
radeon_fence_unref(&fence);
return r;
}
 
 
static void radeon_benchmark_log_results(int n, unsigned size,
unsigned int time,
unsigned sdomain, unsigned ddomain,
char *kind)
{
unsigned int throughput = (n * (size >> 10)) / time;
DRM_INFO("radeon: %s %u bo moves of %u kB from"
" %d to %d in %u ms, throughput: %u Mb/s or %u MB/s\n",
kind, n, size >> 10, sdomain, ddomain, time,
throughput * 8, throughput);
}
 
static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
unsigned sdomain, unsigned ddomain)
{
struct radeon_bo *dobj = NULL;
struct radeon_bo *sobj = NULL;
struct radeon_fence *fence = NULL;
uint64_t saddr, daddr;
unsigned long start_jiffies;
unsigned long end_jiffies;
unsigned long time;
unsigned i, n, size;
int r;
int r, n;
int time;
 
 
ENTER();
 
size = bsize;
n = 4; //1024;
 
dbgprintf("source domain %x\n", sdomain);
 
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
n = RADEON_BENCHMARK_ITERATIONS;
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, NULL, &sobj);
if (r) {
goto out_cleanup;
}
64,10 → 112,7
if (r) {
goto out_cleanup;
}
 
dbgprintf("destination domain %x\n", ddomain);
 
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, &dobj);
r = radeon_bo_create(rdev, size, PAGE_SIZE, true, ddomain, NULL, &dobj);
if (r) {
goto out_cleanup;
}
82,75 → 127,27
dbgprintf("done\n");
 
/* r100 doesn't have dma engine so skip the test */
if (rdev->asic->copy_dma) {
 
dbgprintf("copy dma\n");
 
start_jiffies = GetTimerTicks();
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
if (r) {
/* also, VRAM-to-VRAM test doesn't make much sense for DMA */
/* skip it as well if domains are the same */
if ((rdev->asic->copy.dma) && (sdomain != ddomain)) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_DMA, n);
if (time < 0)
goto out_cleanup;
if (time > 0)
radeon_benchmark_log_results(n, size, time,
sdomain, ddomain, "dma");
}
 
r = radeon_copy_dma(rdev, saddr, daddr,
size / RADEON_GPU_PAGE_SIZE, fence);
 
if (r) {
time = radeon_benchmark_do_move(rdev, size, saddr, daddr,
RADEON_BENCHMARK_COPY_BLIT, n);
if (time < 0)
goto out_cleanup;
}
}
if (time > 0)
radeon_benchmark_log_results(n, size, time,
sdomain, ddomain, "blit");
 
r = radeon_fence_wait(fence, false);
if (r) {
goto out_cleanup;
}
radeon_fence_unref(&fence);
 
end_jiffies = GetTimerTicks();
time = end_jiffies - start_jiffies;
time = jiffies_to_msecs(time);
if (time > 0) {
i = ((n * size) >> 10) / time;
printk(KERN_INFO "radeon: dma %u bo moves of %ukb from"
" %d to %d in %lums (%ukb/ms %ukb/s %uM/s)\n",
n, size >> 10,
sdomain, ddomain, time,
i, i * 1000, (i * 1000) / 1024);
}
}
 
start_jiffies = GetTimerTicks();
for (i = 0; i < n; i++) {
r = radeon_fence_create(rdev, &fence);
if (r) {
goto out_cleanup;
}
r = radeon_copy_blit(rdev, saddr, daddr, size / RADEON_GPU_PAGE_SIZE, fence);
if (r) {
goto out_cleanup;
}
}
 
r = radeon_fence_wait(fence, false);
if (r) {
goto out_cleanup;
}
radeon_fence_unref(&fence);
 
end_jiffies = GetTimerTicks();
time = end_jiffies - start_jiffies;
time = jiffies_to_msecs(time);
if (time > 0) {
i = ((n * size) >> 10) / time;
printk(KERN_INFO "radeon: blit %u bo moves of %ukb from %d to %d"
" in %lums (%ukb/ms %ukb/s %uM/s)\n", n, size >> 10,
sdomain, ddomain, time, i, i * 1000, (i * 1000) / 1024);
}
out_cleanup:
 
dbgprintf("cleanup\n");
 
if (sobj) {
r = radeon_bo_reserve(sobj, false);
if (likely(r == 0)) {
167,11 → 164,9
}
radeon_bo_unref(&dobj);
}
if (fence) {
radeon_fence_unref(&fence);
}
 
if (r) {
printk(KERN_WARNING "Error while benchmarking BO move.\n");
DRM_ERROR("Error while benchmarking BO move.\n");
}
 
LEAVE();
178,12 → 173,86
 
}
 
void radeon_benchmark(struct radeon_device *rdev)
void radeon_benchmark(struct radeon_device *rdev, int test_number)
{
radeon_benchmark_move(rdev, 4096*4096, RADEON_GEM_DOMAIN_GTT,
int i;
int common_modes[RADEON_BENCHMARK_COMMON_MODES_N] = {
640 * 480 * 4,
720 * 480 * 4,
800 * 600 * 4,
848 * 480 * 4,
1024 * 768 * 4,
1152 * 768 * 4,
1280 * 720 * 4,
1280 * 800 * 4,
1280 * 854 * 4,
1280 * 960 * 4,
1280 * 1024 * 4,
1440 * 900 * 4,
1400 * 1050 * 4,
1680 * 1050 * 4,
1600 * 1200 * 4,
1920 * 1080 * 4,
1920 * 1200 * 4
};
 
switch (test_number) {
case 1:
/* simple test, VRAM to GTT and GTT to VRAM */
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
radeon_benchmark_move(rdev, 4096*4096, RADEON_GEM_DOMAIN_VRAM,
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
radeon_benchmark_move(rdev, 4096*4096, RADEON_GEM_DOMAIN_VRAM,
break;
case 2:
/* simple test, VRAM to VRAM */
radeon_benchmark_move(rdev, 1024*1024, RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
case 3:
/* GTT to VRAM, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1)
radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 4:
/* VRAM to GTT, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1)
radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 5:
/* VRAM to VRAM, buffer size sweep, powers of 2 */
for (i = 1; i <= 16384; i <<= 1)
radeon_benchmark_move(rdev, i * RADEON_GPU_PAGE_SIZE,
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
case 6:
/* GTT to VRAM, buffer size sweep, common modes */
for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 7:
/* VRAM to GTT, buffer size sweep, common modes */
for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 8:
/* VRAM to VRAM, buffer size sweep, common modes */
for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
break;
 
default:
DRM_ERROR("Unknown benchmark\n");
}
}
/drivers/video/drm/radeon/radeon_bios.c
25,7 → 25,7
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
99,18 → 99,83
return true;
}
 
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
*/
/* retrieve the ROM in 4k blocks */
#define ATRM_BIOS_PAGE 4096
/**
* radeon_atrm_call - fetch a chunk of the vbios
*
* @atrm_handle: acpi ATRM handle
* @bios: vbios image pointer
* @offset: offset of vbios image data to fetch
* @len: length of vbios image data to fetch
*
* Executes ATRM to fetch a chunk of the discrete
* vbios image on PX systems (all asics).
* Returns the length of the buffer fetched.
*/
static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
int offset, int len)
{
acpi_status status;
union acpi_object atrm_arg_elements[2], *obj;
struct acpi_object_list atrm_arg;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
 
atrm_arg.count = 2;
atrm_arg.pointer = &atrm_arg_elements[0];
 
atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[0].integer.value = offset;
 
atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
atrm_arg_elements[1].integer.value = len;
 
status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
if (ACPI_FAILURE(status)) {
printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
return -ENODEV;
}
 
obj = (union acpi_object *)buffer.pointer;
memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
 
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
int ret;
int size = 256 * 1024;
int i;
struct pci_dev *pdev = NULL;
acpi_handle dhandle, atrm_handle;
acpi_status status;
bool found = false;
 
if (!radeon_atrm_supported(rdev->pdev))
/* ATRM is for the discrete card only */
if (rdev->flags & RADEON_IS_IGP)
return false;
 
while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
continue;
 
status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
if (!ACPI_FAILURE(status)) {
found = true;
break;
}
}
 
if (!found)
return false;
 
rdev->bios = kmalloc(size, GFP_KERNEL);
if (!rdev->bios) {
DRM_ERROR("Unable to allocate bios\n");
118,10 → 183,11
}
 
for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
ret = radeon_atrm_get_bios_chunk(rdev->bios,
ret = radeon_atrm_call(atrm_handle,
rdev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
if (ret <= 0)
if (ret < ATRM_BIOS_PAGE)
break;
}
 
131,6 → 197,12
}
return true;
}
#else
static bool radeon_atrm_get_bios(struct radeon_device *rdev)
{
return false;
}
#endif
 
static bool ni_read_disabled_bios(struct radeon_device *rdev)
{
477,7 → 549,62
return legacy_read_disabled_bios(rdev);
}
 
#ifdef CONFIG_ACPI
static bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
bool ret = false;
struct acpi_table_header *hdr;
acpi_size tbl_size;
UEFI_ACPI_VFCT *vfct;
GOP_VBIOS_CONTENT *vbios;
VFCT_IMAGE_HEADER *vhdr;
 
if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size)))
return false;
if (tbl_size < sizeof(UEFI_ACPI_VFCT)) {
DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n");
goto out_unmap;
}
 
vfct = (UEFI_ACPI_VFCT *)hdr;
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) {
DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n");
goto out_unmap;
}
 
vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset);
vhdr = &vbios->VbiosHeader;
DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n",
vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction,
vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength);
 
if (vhdr->PCIBus != rdev->pdev->bus->number ||
vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) ||
vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) ||
vhdr->VendorID != rdev->pdev->vendor ||
vhdr->DeviceID != rdev->pdev->device) {
DRM_INFO("ACPI VFCT table is not for this card\n");
goto out_unmap;
};
 
if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) {
DRM_ERROR("ACPI VFCT image truncated\n");
goto out_unmap;
}
 
rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL);
ret = !!rdev->bios;
 
out_unmap:
return ret;
}
#else
static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev)
{
return false;
}
#endif
 
bool radeon_get_bios(struct radeon_device *rdev)
{
bool r;
485,6 → 612,8
 
r = radeon_atrm_get_bios(rdev);
if (r == false)
r = radeon_acpi_vfct_bios(rdev);
if (r == false)
r = igp_read_bios_from_vram(rdev);
if (r == false)
r = radeon_read_bios(rdev);
/drivers/video/drm/radeon/radeon_blit_common.h
0,0 → 1,44
/*
* Copyright 2009 Advanced Micro Devices, Inc.
* Copyright 2009 Red Hat Inc.
* Copyright 2012 Alcatel-Lucent, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef __RADEON_BLIT_COMMON_H__
 
#define DI_PT_RECTLIST 0x11
#define DI_INDEX_SIZE_16_BIT 0x0
#define DI_SRC_SEL_AUTO_INDEX 0x2
 
#define FMT_8 0x1
#define FMT_5_6_5 0x8
#define FMT_8_8_8_8 0x1a
#define COLOR_8 0x1
#define COLOR_5_6_5 0x8
#define COLOR_8_8_8_8 0x1a
 
#define RECT_UNIT_H 32
#define RECT_UNIT_W (RADEON_GPU_PAGE_SIZE / 4 / RECT_UNIT_H)
 
#define __RADEON_BLIT_COMMON_H__
#endif
/drivers/video/drm/radeon/radeon_clocks.c
25,8 → 25,8
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
219,6 → 219,9
} else {
DRM_INFO("Using generic clock info\n");
 
/* may need to be per card */
rdev->clock.max_pixel_clock = 35000;
 
if (rdev->flags & RADEON_IS_IGP) {
p1pll->reference_freq = 1432;
p2pll->reference_freq = 1432;
331,7 → 334,7
 
if (!rdev->clock.default_sclk)
rdev->clock.default_sclk = radeon_get_engine_clock(rdev);
if ((!rdev->clock.default_mclk) && rdev->asic->get_memory_clock)
if ((!rdev->clock.default_mclk) && rdev->asic->pm.get_memory_clock)
rdev->clock.default_mclk = radeon_get_memory_clock(rdev);
 
rdev->pm.current_sclk = rdev->clock.default_sclk;
630,7 → 633,7
tmp &= ~(R300_SCLK_FORCE_VAP);
tmp |= RADEON_SCLK_FORCE_CP;
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
udelay(15000);
mdelay(15);
 
tmp = RREG32_PLL(R300_SCLK_CNTL2);
tmp &= ~(R300_SCLK_FORCE_TCL |
648,12 → 651,12
tmp |= (RADEON_ENGIN_DYNCLK_MODE |
(0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
udelay(15000);
mdelay(15);
 
tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
tmp |= RADEON_SCLK_DYN_START_CNTL;
WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
udelay(15000);
mdelay(15);
 
/* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
to lockup randomly, leave them as set by BIOS.
693,7 → 696,7
tmp |= RADEON_SCLK_MORE_FORCEON;
}
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
udelay(15000);
mdelay(15);
}
 
/* RV200::A11 A12, RV250::A11 A12 */
706,7 → 709,7
tmp |= RADEON_TCL_BYPASS_DISABLE;
WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
}
udelay(15000);
mdelay(15);
 
/*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
719,7 → 722,7
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
udelay(15000);
mdelay(15);
 
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
726,7 → 729,7
RADEON_PIXCLK_DAC_ALWAYS_ONb);
 
WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
udelay(15000);
mdelay(15);
}
} else {
/* Turn everything OFF (ForceON to everything) */
858,7 → 861,7
}
WREG32_PLL(RADEON_SCLK_CNTL, tmp);
 
udelay(16000);
mdelay(16);
 
if ((rdev->family == CHIP_R300) ||
(rdev->family == CHIP_R350)) {
867,7 → 870,7
R300_SCLK_FORCE_GA |
R300_SCLK_FORCE_CBA);
WREG32_PLL(R300_SCLK_CNTL2, tmp);
udelay(16000);
mdelay(16);
}
 
if (rdev->flags & RADEON_IS_IGP) {
875,7 → 878,7
tmp &= ~(RADEON_FORCEON_MCLKA |
RADEON_FORCEON_YCLKA);
WREG32_PLL(RADEON_MCLK_CNTL, tmp);
udelay(16000);
mdelay(16);
}
 
if ((rdev->family == CHIP_RV200) ||
884,7 → 887,7
tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
tmp |= RADEON_SCLK_MORE_FORCEON;
WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
udelay(16000);
mdelay(16);
}
 
tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
897,7 → 900,7
RADEON_PIXCLK_TMDS_ALWAYS_ONb);
 
WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
udelay(16000);
mdelay(16);
 
tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
/drivers/video/drm/radeon/radeon_combios.c
24,8 → 24,8
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
 
620,8 → 620,8
i2c.y_data_mask = 0x80;
} else {
/* default masks for ddc pads */
i2c.mask_clk_mask = RADEON_GPIO_EN_1;
i2c.mask_data_mask = RADEON_GPIO_EN_0;
i2c.mask_clk_mask = RADEON_GPIO_MASK_1;
i2c.mask_data_mask = RADEON_GPIO_MASK_0;
i2c.a_clk_mask = RADEON_GPIO_A_1;
i2c.a_data_mask = RADEON_GPIO_A_0;
i2c.en_clk_mask = RADEON_GPIO_EN_1;
719,6 → 719,34
return i2c;
}
 
static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct radeon_i2c_bus_rec i2c;
u16 offset;
u8 id, blocks, clk, data;
int i;
 
i2c.valid = false;
 
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
break;
}
}
}
return i2c;
}
 
void radeon_combios_i2c_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
755,30 → 783,14
} else if (rdev->family == CHIP_RS300 ||
rdev->family == CHIP_RS400 ||
rdev->family == CHIP_RS480) {
u16 offset;
u8 id, blocks, clk, data;
int i;
 
/* 0x68 */
i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0);
rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID");
 
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
if (offset) {
blocks = RBIOS8(offset + 2);
for (i = 0; i < blocks; i++) {
id = RBIOS8(offset + 3 + (i * 5) + 0);
if (id == 136) {
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
/* gpiopad */
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
(1 << clk), (1 << data));
i2c = radeon_combios_get_i2c_info_from_table(rdev);
if (i2c.valid)
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
}
}
} else if ((rdev->family == CHIP_R200) ||
(rdev->family >= CHIP_R300)) {
/* 0x68 */
1561,6 → 1573,11
(rdev->pdev->subsystem_device == 0x4150)) {
/* Mac G5 tower 9600 */
rdev->mode_info.connector_table = CT_MAC_G5_9600;
} else if ((rdev->pdev->device == 0x4c66) &&
(rdev->pdev->subsystem_vendor == 0x1002) &&
(rdev->pdev->subsystem_device == 0x4c66)) {
/* SAM440ep RV250 embedded board */
rdev->mode_info.connector_table = CT_SAM440EP;
} else
#endif /* CONFIG_PPC_PMAC */
#ifdef CONFIG_PPC64
2134,6 → 2151,67
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
case CT_SAM440EP:
DRM_INFO("Connector Table: %d (SAM440ep embedded board)\n",
rdev->mode_info.connector_table);
/* LVDS */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_NONE_DETECTED, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_LCD1_SUPPORT,
0),
ATOM_DEVICE_LCD1_SUPPORT);
radeon_add_legacy_connector(dev, 0, ATOM_DEVICE_LCD1_SUPPORT,
DRM_MODE_CONNECTOR_LVDS, &ddc_i2c,
CONNECTOR_OBJECT_ID_LVDS,
&hpd);
/* DVI-I - secondary dac, int tmds */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
hpd.hpd = RADEON_HPD_1; /* ??? */
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_DFP1_SUPPORT,
0),
ATOM_DEVICE_DFP1_SUPPORT);
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT2_SUPPORT,
2),
ATOM_DEVICE_CRT2_SUPPORT);
radeon_add_legacy_connector(dev, 1,
ATOM_DEVICE_DFP1_SUPPORT |
ATOM_DEVICE_CRT2_SUPPORT,
DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
&hpd);
/* VGA - primary dac */
ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_CRT1_SUPPORT,
1),
ATOM_DEVICE_CRT1_SUPPORT);
radeon_add_legacy_connector(dev, 2,
ATOM_DEVICE_CRT1_SUPPORT,
DRM_MODE_CONNECTOR_VGA, &ddc_i2c,
CONNECTOR_OBJECT_ID_VGA,
&hpd);
/* TV - TV DAC */
ddc_i2c.valid = false;
hpd.hpd = RADEON_HPD_NONE;
radeon_add_legacy_encoder(dev,
radeon_get_encoder_enum(dev,
ATOM_DEVICE_TV1_SUPPORT,
2),
ATOM_DEVICE_TV1_SUPPORT);
radeon_add_legacy_connector(dev, 3, ATOM_DEVICE_TV1_SUPPORT,
DRM_MODE_CONNECTOR_SVIDEO,
&ddc_i2c,
CONNECTOR_OBJECT_ID_SVIDEO,
&hpd);
break;
default:
DRM_INFO("Connector table: %d (invalid)\n",
rdev->mode_info.connector_table);
2255,6 → 2333,9
connector = (tmp >> 12) & 0xf;
 
ddc_type = (tmp >> 8) & 0xf;
if (ddc_type == 5)
ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev);
else
ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0);
 
switch (connector) {
2563,15 → 2644,18
 
/* allocate 2 power states */
rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
if (!rdev->pm.power_state) {
rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = 0;
if (rdev->pm.power_state) {
/* allocate 1 clock mode per state */
rdev->pm.power_state[0].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
rdev->pm.power_state[1].clock_info =
kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
if (!rdev->pm.power_state[0].clock_info ||
!rdev->pm.power_state[1].clock_info)
goto pm_failed;
} else
goto pm_failed;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
return;
}
 
/* check for a thermal chip */
offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
if (offset) {
2617,7 → 2701,26
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
}
}
} else {
/* boards with a thermal chip, but no overdrive table */
 
/* Asus 9600xt has an f75375 on the monid bus */
if ((dev->pdev->device == 0x4152) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
(dev->pdev->subsystem_device == 0xc002)) {
i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0);
rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus);
if (rdev->pm.i2c_bus) {
struct i2c_board_info info = { };
const char *name = "f75375";
info.addr = 0x28;
strlcpy(info.type, name, sizeof(info.type));
i2c_new_device(&rdev->pm.i2c_bus->adapter, &info);
DRM_INFO("Possible %s thermal controller at 0x%02x\n",
name, info.addr);
}
}
}
 
if (rdev->flags & RADEON_IS_MOBILITY) {
offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
2716,6 → 2819,14
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
return;
 
pm_failed:
rdev->pm.default_power_state_index = state_index;
rdev->pm.num_power_states = 0;
 
rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
rdev->pm.current_clock_mode_index = 0;
}
 
void radeon_external_tmds_setup(struct drm_encoder *encoder)
2815,7 → 2926,7
case 4:
val = RBIOS16(index);
index += 2;
udelay(val * 1000);
mdelay(val);
break;
case 6:
slave_addr = id & 0xff;
3014,7 → 3125,7
udelay(150);
break;
case 2:
udelay(1000);
mdelay(1);
break;
case 3:
while (tmp--) {
3045,13 → 3156,13
/*mclk_cntl |= 0x00001111;*//* ??? */
WREG32_PLL(RADEON_MCLK_CNTL,
mclk_cntl);
udelay(10000);
mdelay(10);
#endif
WREG32_PLL
(RADEON_CLK_PWRMGT_CNTL,
tmp &
~RADEON_CG_NO1_DEBUG_0);
udelay(10000);
mdelay(10);
}
break;
default:
3208,15 → 3319,6
WREG32(RADEON_CONFIG_MEMSIZE, mem_size);
}
 
void radeon_combios_dyn_clk_setup(struct drm_device *dev, int enable)
{
uint16_t dyn_clk_info =
combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
 
if (dyn_clk_info)
combios_parse_pll_table(dev, dyn_clk_info);
}
 
void radeon_combios_asic_init(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
3279,6 → 3381,14
rdev->pdev->subsystem_device == 0x30a4)
return;
 
/* quirk for rs4xx Compaq Presario V5245EU laptop to make it resume
* - it hangs on resume inside the dynclk 1 table.
*/
if (rdev->family == CHIP_RS480 &&
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x30ae)
return;
 
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
if (table)
/drivers/video/drm/radeon/radeon_connectors.c
23,11 → 23,11
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "drm_edid.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
 
40,12 → 40,6
struct drm_encoder *encoder,
bool connected);
 
extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
 
bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
 
void radeon_connector_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
60,20 → 54,41
 
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
/* powering up/down the eDP panel generates hpd events which
* can interfere with modesetting.
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
/* if the connector is already off, don't turn it back on */
if (connector->dpms != DRM_MODE_DPMS_ON)
return;
 
/* pre-r600 did not always have the hpd pins mapped accurately to connectors */
if (rdev->family >= CHIP_R600) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
/* just deal with DP (not eDP) here. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
/* if existing sink type was not DP no need to retrain */
if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
return;
 
/* first get sink type as it may be reset after (un)plug */
dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
/* don't do anything if sink is not display port, i.e.,
* passive dp->(dvi|hdmi) adaptor
*/
if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
int saved_dpms = connector->dpms;
/* Only turn off the display if it's physically disconnected */
if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (radeon_dp_needs_link_train(radeon_connector)) {
/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
*/
connector->dpms = DRM_MODE_DPMS_OFF;
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
connector->dpms = saved_dpms;
}
}
}
 
static void radeon_property_change_mode(struct drm_encoder *encoder)
{
84,6 → 99,62
crtc->x, crtc->y, crtc->fb);
}
}
 
int radeon_get_monitor_bpc(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
int bpc = 8;
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
}
break;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) ||
drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
}
break;
case DRM_MODE_CONNECTOR_eDP:
case DRM_MODE_CONNECTOR_LVDS:
if (connector->display_info.bpc)
bpc = connector->display_info.bpc;
else if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
struct drm_connector_helper_funcs *connector_funcs =
connector->helper_private;
struct drm_encoder *encoder = connector_funcs->best_encoder(connector);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
 
if (dig->lcd_misc & ATOM_PANEL_MISC_V13_6BIT_PER_COLOR)
bpc = 6;
else if (dig->lcd_misc & ATOM_PANEL_MISC_V13_8BIT_PER_COLOR)
bpc = 8;
}
break;
}
return bpc;
}
 
static void
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status)
{
123,7 → 194,7
}
}
 
struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
144,7 → 215,7
return NULL;
}
 
struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct drm_mode_object *obj;
295,7 → 366,7
}
}
 
int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
static int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property,
uint64_t val)
{
struct drm_device *dev = connector->dev;
430,55 → 501,24
return 0;
}
 
/*
* Some integrated ATI Radeon chipset implementations (e. g.
* Asus M2A-VM HDMI) may indicate the availability of a DDC,
* even when there's no monitor connected. For these connectors
* following DDC probe extension will be applied: check also for the
* availability of EDID with at least a correct EDID header. Only then,
* DDC is assumed to be available. This prevents drm_get_edid() and
* drm_edid_block_valid() from periodically dumping data and kernel
* errors into the logs and onto the terminal.
*/
static bool radeon_connector_needs_extended_probe(struct radeon_device *dev,
uint32_t supported_device,
int connector_type)
{
/* Asus M2A-VM HDMI board sends data to i2c bus even,
* if HDMI add-on card is not plugged in or HDMI is disabled in
* BIOS. Valid DDC can only be assumed, if also a valid EDID header
* can be retrieved via i2c bus during DDC probe */
if ((dev->pdev->device == 0x791e) &&
(dev->pdev->subsystem_vendor == 0x1043) &&
(dev->pdev->subsystem_device == 0x826d)) {
if ((connector_type == DRM_MODE_CONNECTOR_HDMIA) &&
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return true;
}
/* ECS A740GM-M with ATI RADEON 2100 sends data to i2c bus
* for a DVI connector that is not implemented */
if ((dev->pdev->device == 0x796e) &&
(dev->pdev->subsystem_vendor == 0x1019) &&
(dev->pdev->subsystem_device == 0x2615)) {
if ((connector_type == DRM_MODE_CONNECTOR_DVID) &&
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return true;
}
 
/* Default: no EDID header probe required for DDC probing */
return false;
}
 
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder,
struct drm_connector *connector)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
struct drm_display_mode *t, *mode;
 
/* If the EDID preferred mode doesn't match the native mode, use it */
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
if (mode->hdisplay != native_mode->hdisplay ||
mode->vdisplay != native_mode->vdisplay)
memcpy(native_mode, mode, sizeof(*mode));
}
}
 
/* Try to get native mode details from EDID if necessary */
if (!native_mode->clock) {
struct drm_display_mode *t, *mode;
 
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) {
if (mode->hdisplay == native_mode->hdisplay &&
mode->vdisplay == native_mode->vdisplay) {
489,6 → 529,7
}
}
}
 
if (!native_mode->clock) {
DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
radeon_encoder->rmx_type = RMX_OFF;
605,7 → 646,7
if (radeon_connector->edid)
kfree(radeon_connector->edid);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
// drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
646,13 → 687,13
}
 
 
struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
static const struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = {
.get_modes = radeon_lvds_get_modes,
.mode_valid = radeon_lvds_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
 
struct drm_connector_funcs radeon_lvds_connector_funcs = {
static const struct drm_connector_funcs radeon_lvds_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_lvds_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
700,9 → 741,9
ret = connector_status_disconnected;
 
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe);
dret = radeon_ddc_probe(radeon_connector);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
729,12 → 770,21
} else {
 
/* if we aren't forcing don't do destructive polling */
if (!force)
if (!force) {
/* only return the previous status if we last
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
return connector->status;
else
return ret;
}
 
if (radeon_connector->dac_load_detect && encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
if (ret != connector_status_disconnected)
radeon_connector->detected_by_load = true;
}
}
 
755,13 → 805,13
return ret;
}
 
struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
static const struct drm_connector_helper_funcs radeon_vga_connector_helper_funcs = {
.get_modes = radeon_vga_get_modes,
.mode_valid = radeon_vga_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
 
struct drm_connector_funcs radeon_vga_connector_funcs = {
static const struct drm_connector_funcs radeon_vga_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_vga_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
825,13 → 875,13
return ret;
}
 
struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
static const struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = {
.get_modes = radeon_tv_get_modes,
.mode_valid = radeon_tv_mode_valid,
.best_encoder = radeon_best_single_encoder,
};
 
struct drm_connector_funcs radeon_tv_connector_funcs = {
static const struct drm_connector_funcs radeon_tv_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_tv_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
848,6 → 898,27
return ret;
}
 
static bool radeon_check_hpd_status_unchanged(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
enum drm_connector_status status;
 
/* We only trust HPD on R600 and newer ASICS. */
if (rdev->family >= CHIP_R600
&& radeon_connector->hpd.hpd != RADEON_HPD_NONE) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
status = connector_status_connected;
else
status = connector_status_disconnected;
if (connector->status == status)
return true;
}
 
return false;
}
 
/*
* DVI is complicated
* Do a DDC probe, if DDC probe passes, get the full EDID so
872,10 → 943,13
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
 
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
 
if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe);
dret = radeon_ddc_probe(radeon_connector);
if (dret) {
radeon_connector->detected_by_load = false;
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
938,7 → 1012,17
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true))
goto out;
 
/* DVI-D and HDMI-A are digital only */
if ((connector->connector_type == DRM_MODE_CONNECTOR_DVID) ||
(connector->connector_type == DRM_MODE_CONNECTOR_HDMIA))
goto out;
 
/* if we aren't forcing don't do destructive polling */
if (!force) {
/* only return the previous status if we last
* detected a monitor via load.
*/
if (radeon_connector->detected_by_load)
ret = connector->status;
goto out;
}
957,6 → 1041,10
 
encoder = obj_to_encoder(obj);
 
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC &&
encoder->encoder_type != DRM_MODE_ENCODER_TVDAC)
continue;
 
encoder_funcs = encoder->helper_private;
if (encoder_funcs->detect) {
if (ret != connector_status_connected) {
964,6 → 1052,8
if (ret == connector_status_connected) {
radeon_connector->use_digital = false;
}
if (ret != connector_status_disconnected)
radeon_connector->detected_by_load = true;
}
break;
}
981,6 → 1071,7
* cases the DVI port is actually a virtual KVM port connected to the service
* processor.
*/
out:
if ((!rdev->is_atom_bios) &&
(ret == connector_status_disconnected) &&
rdev->mode_info.bios_hardcoded_edid_size) {
988,7 → 1079,6
ret = connector_status_connected;
}
 
out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
return ret;
995,7 → 1085,7
}
 
/* okay need to be smart in here about which encoder to pick */
struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1066,7 → 1156,7
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
if (ASIC_IS_DCE3(rdev)) {
if (ASIC_IS_DCE6(rdev)) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
1085,13 → 1175,13
return MODE_OK;
}
 
struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
static const struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
.get_modes = radeon_dvi_get_modes,
.mode_valid = radeon_dvi_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
 
struct drm_connector_funcs radeon_dvi_connector_funcs = {
static const struct drm_connector_funcs radeon_dvi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dvi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
1110,7 → 1200,7
if (radeon_dig_connector->dp_i2c_bus)
radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
// drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
1126,6 → 1216,7
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
 
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
1133,6 → 1224,15
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
ret = radeon_ddc_get_modes(radeon_connector);
}
 
if (ret > 0) {
if (encoder) {
1143,7 → 1243,6
return ret;
}
 
encoder = radeon_best_single_encoder(connector);
if (!encoder)
return 0;
 
1160,7 → 1259,8
}
} else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
if (encoder)
radeon_atom_ext_encoder_setup_ddc(encoder);
}
1170,13 → 1270,12
return ret;
}
 
bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector)
u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector)
{
struct drm_mode_object *obj;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
int i;
bool found = false;
 
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] == 0)
1192,14 → 1291,13
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
found = true;
break;
return radeon_encoder->encoder_id;
default:
break;
}
}
 
return found;
return ENCODER_OBJECT_ID_NONE;
}
 
bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector)
1251,6 → 1349,9
struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
 
if (!force && radeon_check_hpd_status_unchanged(connector))
return connector->status;
 
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
1276,12 → 1377,24
if (!radeon_dig_connector->edp_on)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
} else {
/* need to setup ddc on the bridge */
if (radeon_connector_encoder_is_dp_bridge(connector)) {
if (encoder)
} else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) {
/* DP bridges are always DP */
radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
/* get the DPCD from the bridge */
radeon_dp_getdpcd(radeon_connector);
 
if (encoder) {
/* setup ddc on the bridge */
radeon_atom_ext_encoder_setup_ddc(encoder);
if (radeon_ddc_probe(radeon_connector)) /* try DDC */
ret = connector_status_connected;
else if (radeon_connector->dac_load_detect) { /* try load detection */
struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
} else {
radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
ret = connector_status_connected;
1292,22 → 1405,11
if (radeon_dp_getdpcd(radeon_connector))
ret = connector_status_connected;
} else {
if (radeon_ddc_probe(radeon_connector,
radeon_connector->requires_extended_probe))
if (radeon_ddc_probe(radeon_connector))
ret = connector_status_connected;
}
}
 
if ((ret == connector_status_disconnected) &&
radeon_connector->dac_load_detect) {
struct drm_encoder *encoder = radeon_best_single_encoder(connector);
struct drm_encoder_helper_funcs *encoder_funcs;
if (encoder) {
encoder_funcs = encoder->helper_private;
ret = encoder_funcs->detect(encoder, connector);
}
}
}
 
radeon_connector_update_scratch_regs(connector, ret);
return ret;
1356,13 → 1458,13
}
}
 
struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
.get_modes = radeon_dp_get_modes,
.mode_valid = radeon_dp_mode_valid,
.best_encoder = radeon_dvi_encoder,
};
 
struct drm_connector_funcs radeon_dp_connector_funcs = {
static const struct drm_connector_funcs radeon_dp_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = radeon_dp_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
1448,9 → 1550,7
radeon_connector->shared_ddc = shared_ddc;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
radeon_connector->requires_extended_probe =
radeon_connector_needs_extended_probe(rdev, supported_device,
connector_type);
 
radeon_connector->router = *router;
if (router->ddc_valid || router->cd_valid) {
radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
1746,7 → 1846,7
connector->polled = DRM_CONNECTOR_POLL_HPD;
 
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
// drm_sysfs_connector_add(connector);
return;
 
failed:
1797,9 → 1897,7
radeon_connector->devices = supported_device;
radeon_connector->connector_object_id = connector_object_id;
radeon_connector->hpd = *hpd;
radeon_connector->requires_extended_probe =
radeon_connector_needs_extended_probe(rdev, supported_device,
connector_type);
 
switch (connector_type) {
case DRM_MODE_CONNECTOR_VGA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
1905,16 → 2003,5
} else
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
drm_sysfs_connector_add(connector);
if (connector_type == DRM_MODE_CONNECTOR_LVDS) {
struct drm_encoder *drm_encoder;
 
list_for_each_entry(drm_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder;
 
radeon_encoder = to_radeon_encoder(drm_encoder);
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_LVDS)
radeon_legacy_backlight_init(radeon_encoder, connector);
// drm_sysfs_connector_add(connector);
}
}
}
/drivers/video/drm/radeon/radeon_device.c
26,7 → 26,7
* Jerome Glisse
*/
//#include <linux/console.h>
 
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
55,7 → 55,9
int radeon_hw_i2c = 0;
int radeon_pcie_gen2 = 0;
int radeon_disp_priority = 0;
int radeon_lockup_timeout = 10000;
 
 
int irq_override = 0;
 
 
65,7 → 67,7
int init_display(struct radeon_device *rdev, videomode_t *mode);
int init_display_kms(struct radeon_device *rdev, videomode_t *mode);
 
int get_modes(videomode_t *mode, int *count);
int get_modes(videomode_t *mode, u32_t *count);
int set_user_mode(videomode_t *mode);
int r100_2D_test(struct radeon_device *rdev);
 
132,11 → 134,19
"TURKS",
"CAICOS",
"CAYMAN",
"ARUBA",
"TAHITI",
"PITCAIRN",
"VERDE",
"LAST",
};
 
/*
* Clear GPU surface registers.
/**
* radeon_surface_init - Clear GPU surface registers.
*
* @rdev: radeon_device pointer
*
* Clear GPU surface registers (r1xx-r5xx).
*/
void radeon_surface_init(struct radeon_device *rdev)
{
155,6 → 165,13
/*
* GPU scratch registers helpers function.
*/
/**
* radeon_scratch_init - Init scratch register driver information.
*
* @rdev: radeon_device pointer
*
* Init CP scratch register driver information (r1xx-r5xx)
*/
void radeon_scratch_init(struct radeon_device *rdev)
{
int i;
172,6 → 189,15
}
}
 
/**
* radeon_scratch_get - Allocate a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Allocate a CP scratch register for use by the driver (all asics).
* Returns 0 on success or -EINVAL on failure.
*/
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
{
int i;
186,6 → 212,14
return -EINVAL;
}
 
/**
* radeon_scratch_free - Free a scratch register
*
* @rdev: radeon_device pointer
* @reg: scratch register mmio offset
*
* Free a CP scratch register allocated for use by the driver (all asics)
*/
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
{
int i;
198,6 → 232,20
}
}
 
/*
* radeon_wb_*()
* Writeback is the the method by which the the GPU updates special pages
* in memory with the status of certain GPU events (fences, ring pointers,
* etc.).
*/
 
/**
* radeon_wb_disable - Disable Writeback
*
* @rdev: radeon_device pointer
*
* Disables Writeback (all asics). Used for suspend.
*/
void radeon_wb_disable(struct radeon_device *rdev)
{
int r;
213,6 → 261,14
rdev->wb.enabled = false;
}
 
/**
* radeon_wb_fini - Disable Writeback and free memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver shutdown.
*/
void radeon_wb_fini(struct radeon_device *rdev)
{
radeon_wb_disable(rdev);
223,6 → 279,15
}
}
 
/**
* radeon_wb_init- Init Writeback driver info and allocate memory
*
* @rdev: radeon_device pointer
*
* Disables Writeback and frees the Writeback memory (all asics).
* Used at driver startup.
* Returns 0 on success or an -error on failure.
*/
int radeon_wb_init(struct radeon_device *rdev)
{
int r;
229,7 → 294,7
 
if (rdev->wb.wb_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
return r;
261,21 → 326,25
/* disable event_write fences */
rdev->wb.use_event = false;
/* disabled via module param */
if (radeon_no_wb == 1)
if (radeon_no_wb == 1) {
rdev->wb.enabled = false;
else {
} else {
if (rdev->flags & RADEON_IS_AGP) {
/* often unreliable on AGP */
// if (rdev->flags & RADEON_IS_AGP) {
// rdev->wb.enabled = false;
// } else {
rdev->wb.enabled = false;
} else if (rdev->family < CHIP_R300) {
/* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
/* event_write fences are only available on r600+ */
if (rdev->family >= CHIP_R600)
if (rdev->family >= CHIP_R600) {
rdev->wb.use_event = true;
// }
}
/* always use writeback/events on NI */
if (ASIC_IS_DCE5(rdev)) {
}
}
/* always use writeback/events on NI, APUs */
if (rdev->family >= CHIP_PALM) {
rdev->wb.enabled = true;
rdev->wb.use_event = true;
}
328,6 → 397,8
*/
void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
{
uint64_t limit = (uint64_t)radeon_vram_limit << 20;
 
mc->vram_start = base;
if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
341,6 → 412,8
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
if (limit && limit < mc->real_vram_size)
mc->real_vram_size = limit;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
385,6 → 458,15
/*
* GPU helpers function.
*/
/**
* radeon_card_posted - check if the hw has already been initialized
*
* @rdev: radeon_device pointer
*
* Check if the asic has been initialized (all asics).
* Used at driver startup.
* Returns true if initialized or false if not.
*/
bool radeon_card_posted(struct radeon_device *rdev)
{
uint32_t reg;
431,6 → 513,14
 
}
 
/**
* radeon_update_bandwidth_info - update display bandwidth params
*
* @rdev: radeon_device pointer
*
* Used when sclk/mclk are switched or display modes are set.
* params are used to calculate display watermarks (all asics)
*/
void radeon_update_bandwidth_info(struct radeon_device *rdev)
{
fixed20_12 a;
451,6 → 541,15
}
}
 
/**
* radeon_boot_test_post_card - check and possibly initialize the hw
*
* @rdev: radeon_device pointer
*
* Check if the asic is initialized and if not, attempt to initialize
* it (all asics).
* Returns true if initialized or false if not.
*/
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
469,14 → 568,24
}
}
 
/**
* radeon_dummy_page_init - init dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Allocate the dummy page used by the driver (all asics).
* This dummy page is used by the driver as a filler for gart entries
* when pages are taken out of the GART
* Returns 0 on sucess, -ENOMEM on failure.
*/
int radeon_dummy_page_init(struct radeon_device *rdev)
{
if (rdev->dummy_page.page)
return 0;
rdev->dummy_page.page = AllocPage();
rdev->dummy_page.page = (void*)AllocPage();
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
rdev->dummy_page.addr = MapIoMem(rdev->dummy_page.page, 4096, 5);
rdev->dummy_page.addr = MapIoMem((addr_t)rdev->dummy_page.page, 4096, 3);
if (!rdev->dummy_page.addr) {
// __free_page(rdev->dummy_page.page);
rdev->dummy_page.page = NULL;
485,16 → 594,40
return 0;
}
 
/**
* radeon_dummy_page_fini - free dummy page used by the driver
*
* @rdev: radeon_device pointer
*
* Frees the dummy page used by the driver (all asics).
*/
void radeon_dummy_page_fini(struct radeon_device *rdev)
{
if (rdev->dummy_page.page == NULL)
return;
KernelFree(rdev->dummy_page.addr);
KernelFree((void*)rdev->dummy_page.addr);
rdev->dummy_page.page = NULL;
}
 
 
/* ATOM accessor methods */
/*
* ATOM is an interpreted byte code stored in tables in the vbios. The
* driver registers callbacks to access registers and the interpreter
* in the driver parses the tables and executes then to program specific
* actions (set display modes, asic init, etc.). See radeon_atombios.c,
* atombios.h, and atom.c
*/
 
/**
* cail_pll_read - read PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
* Returns the value of the PLL register.
*/
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
504,6 → 637,15
return r;
}
 
/**
* cail_pll_write - write PLL register
*
* @info: atom card_info pointer
* @reg: PLL register offset
* @val: value to write to the pll register
*
* Provides a PLL register accessor for the atom interpreter (r4xx+).
*/
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
511,6 → 653,15
rdev->pll_wreg(rdev, reg, val);
}
 
/**
* cail_mc_read - read MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
*
* Provides an MC register accessor for the atom interpreter (r4xx+).
* Returns the value of the MC register.
*/
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
520,6 → 671,15
return r;
}
 
/**
* cail_mc_write - write MC (Memory Controller) register
*
* @info: atom card_info pointer
* @reg: MC register offset
* @val: value to write to the pll register
*
* Provides a MC register accessor for the atom interpreter (r4xx+).
*/
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
527,6 → 687,15
rdev->mc_wreg(rdev, reg, val);
}
 
/**
* cail_reg_write - write MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
* @val: value to write to the pll register
*
* Provides a MMIO register accessor for the atom interpreter (r4xx+).
*/
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
534,6 → 703,15
WREG32(reg*4, val);
}
 
/**
* cail_reg_read - read MMIO register
*
* @info: atom card_info pointer
* @reg: MMIO register offset
*
* Provides an MMIO register accessor for the atom interpreter (r4xx+).
* Returns the value of the MMIO register.
*/
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
543,6 → 721,15
return r;
}
 
/**
* cail_ioreg_write - write IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
* @val: value to write to the pll register
*
* Provides a IO register accessor for the atom interpreter (r4xx+).
*/
static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
{
struct radeon_device *rdev = info->dev->dev_private;
550,6 → 737,15
WREG32_IO(reg*4, val);
}
 
/**
* cail_ioreg_read - read IO register
*
* @info: atom card_info pointer
* @reg: IO register offset
*
* Provides an IO register accessor for the atom interpreter (r4xx+).
* Returns the value of the IO register.
*/
static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
{
struct radeon_device *rdev = info->dev->dev_private;
559,6 → 755,16
return r;
}
 
/**
* radeon_atombios_init - init the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info and register access callbacks for the
* ATOM interpreter (r4xx+).
* Returns 0 on sucess, -ENOMEM on failure.
* Called at driver startup.
*/
int radeon_atombios_init(struct radeon_device *rdev)
{
struct card_info *atom_card_info =
592,6 → 798,15
return 0;
}
 
/**
* radeon_atombios_fini - free the driver info and callbacks for atombios
*
* @rdev: radeon_device pointer
*
* Frees the driver info and register access callbacks for the ATOM
* interpreter (r4xx+).
* Called at driver shutdown.
*/
void radeon_atombios_fini(struct radeon_device *rdev)
{
if (rdev->mode_info.atom_context) {
601,6 → 816,22
kfree(rdev->mode_info.atom_card_info);
}
 
/* COMBIOS */
/*
* COMBIOS is the bios format prior to ATOM. It provides
* command tables similar to ATOM, but doesn't have a unified
* parser. See radeon_combios.c
*/
 
/**
* radeon_combios_init - init the driver info for combios
*
* @rdev: radeon_device pointer
*
* Initializes the driver info for combios (r1xx-r3xx).
* Returns 0 on sucess.
* Called at driver startup.
*/
int radeon_combios_init(struct radeon_device *rdev)
{
radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
607,11 → 838,28
return 0;
}
 
/**
* radeon_combios_fini - free the driver info for combios
*
* @rdev: radeon_device pointer
*
* Frees the driver info for combios (r1xx-r3xx).
* Called at driver shutdown.
*/
void radeon_combios_fini(struct radeon_device *rdev)
{
}
 
/* if we get transitioned to only one device, tak VGA back */
/* if we get transitioned to only one device, take VGA back */
/**
* radeon_vga_set_decode - enable/disable vga decode
*
* @cookie: radeon_device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
static unsigned int radeon_vga_set_decode(void *cookie, bool state)
{
struct radeon_device *rdev = cookie;
623,55 → 871,49
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
 
void radeon_check_arguments(struct radeon_device *rdev)
/**
* radeon_check_pot_argument - check that argument is a power of two
*
* @arg: value to check
*
* Validates that a certain argument is a power of two (all asics).
* Returns true if argument is valid.
*/
static bool radeon_check_pot_argument(int arg)
{
return (arg & (arg - 1)) == 0;
}
 
/**
* radeon_check_arguments - validate module params
*
* @rdev: radeon_device pointer
*
* Validates certain module parameters and updates
* the associated values used by the driver (all asics).
*/
static void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
switch (radeon_vram_limit) {
case 0:
case 4:
case 8:
case 16:
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
if (!radeon_check_pot_argument(radeon_vram_limit)) {
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
radeon_vram_limit);
radeon_vram_limit = 0;
break;
}
radeon_vram_limit = radeon_vram_limit << 20;
 
/* gtt size must be power of two and greater or equal to 32M */
switch (radeon_gart_size) {
case 4:
case 8:
case 16:
if (radeon_gart_size < 32) {
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
radeon_gart_size);
radeon_gart_size = 512;
break;
case 32:
case 64:
case 128:
case 256:
case 512:
case 1024:
case 2048:
case 4096:
break;
default:
 
} else if (!radeon_check_pot_argument(radeon_gart_size)) {
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
radeon_gart_size);
radeon_gart_size = 512;
break;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
 
/* AGP mode can only be -1, 1, 2, 4, 8 */
switch (radeon_agpmode) {
case -1:
705,25 → 947,38
rdev->is_atom_bios = false;
rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
rdev->gpu_lockup = false;
rdev->accel_working = false;
/* set up ring ids */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
rdev->ring[i].idx = i;
}
 
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device);
DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
radeon_family_name[rdev->family], pdev->vendor, pdev->device,
pdev->subsystem_vendor, pdev->subsystem_device);
 
/* mutex initialization are all done here so we
* can recall function without having locking issues */
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
mutex_init(&rdev->ring_lock);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
atomic_set(&rdev->ih.lock, 0);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
mutex_init(&rdev->gpu_clock_mutex);
init_rwsem(&rdev->pm.mclk_lock);
init_rwsem(&rdev->exclusive_lock);
init_waitqueue_head(&rdev->irq.vblank_queue);
r = radeon_gem_init(rdev);
if (r)
return r;
/* initialize vm here */
mutex_init(&rdev->vm_manager.lock);
/* Adjust VM size here.
* Currently set to 4GB ((1 << 20) 4k pages).
* Max GPUVM size for cayman and SI is 40 bits.
*/
rdev->vm_manager.max_pfn = 1 << 20;
INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
 
/* Set asic functions */
r = radeon_asic_init(rdev);
745,14 → 1000,15
 
/* set DMA mask + need_dma32 flags.
* PCIE - can handle 40-bits.
* IGP - can handle 40-bits (in theory)
* IGP - can handle 40-bits
* AGP - generally dma32 is safest
* PCI - only dma32
* PCI - dma32 for legacy pci gart, 40 bits on newer asics
*/
rdev->need_dma32 = false;
if (rdev->flags & RADEON_IS_AGP)
rdev->need_dma32 = true;
if (rdev->flags & RADEON_IS_PCI)
if ((rdev->flags & RADEON_IS_PCI) &&
(rdev->family <= CHIP_RS740))
rdev->need_dma32 = true;
 
dma_bits = rdev->need_dma32 ? 32 : 40;
759,6 → 1015,7
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
 
766,10 → 1023,7
/* TODO: block userspace mapping of io register */
rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
 
rdev->rmmio = (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
PG_SW+PG_NOCACHE);
 
rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
if (rdev->rmmio == NULL) {
return -ENOMEM;
}
776,6 → 1030,18
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
 
/* io port mapping */
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
break;
}
}
if (rdev->rio_mem == NULL)
DRM_ERROR("Unable to find PCI I/O BAR\n");
 
 
r = radeon_init(rdev);
if (r)
return r;
794,13 → 1060,94
// if (radeon_testing) {
// radeon_test_moves(rdev);
// }
// if ((radeon_testing & 2)) {
// radeon_test_syncing(rdev);
// }
if (radeon_benchmarking) {
radeon_benchmark(rdev);
radeon_benchmark(rdev, radeon_benchmarking);
}
return 0;
}
 
/**
* radeon_gpu_reset - reset the asic
*
* @rdev: radeon device pointer
*
* Attempt the reset the GPU if it has hung (all asics).
* Returns 0 for success or an error on failure.
*/
int radeon_gpu_reset(struct radeon_device *rdev)
{
unsigned ring_sizes[RADEON_NUM_RINGS];
uint32_t *ring_data[RADEON_NUM_RINGS];
 
bool saved = false;
 
int i, r;
int resched;
 
// down_write(&rdev->exclusive_lock);
radeon_save_bios_scratch_regs(rdev);
/* block TTM */
// resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
radeon_suspend(rdev);
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
&ring_data[i]);
if (ring_sizes[i]) {
saved = true;
dev_info(rdev->dev, "Saved %d dwords of commands "
"on ring %d.\n", ring_sizes[i], i);
}
}
 
retry:
r = radeon_asic_reset(rdev);
if (!r) {
dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
radeon_resume(rdev);
}
 
radeon_restore_bios_scratch_regs(rdev);
drm_helper_resume_force_mode(rdev->ddev);
 
if (!r) {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
radeon_ring_restore(rdev, &rdev->ring[i],
ring_sizes[i], ring_data[i]);
ring_sizes[i] = 0;
ring_data[i] = NULL;
}
 
r = radeon_ib_ring_tests(rdev);
if (r) {
dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
if (saved) {
saved = false;
radeon_suspend(rdev);
goto retry;
}
}
} else {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
kfree(ring_data[i]);
}
}
 
// ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
if (r) {
/* bad news, how to tell it to userspace ? */
dev_info(rdev->dev, "GPU reset failed\n");
}
 
// up_write(&rdev->exclusive_lock);
return r;
}
 
 
 
/*
* Driver load/unload
*/
901,15 → 1248,6
init_display(dev->dev_private, &usermode);
 
 
uint32_t route0 = PciRead32(0, 31<<3, 0x60);
 
uint32_t route1 = PciRead32(0, 31<<3, 0x68);
 
uint8_t elcr0 = in8(0x4D0);
uint8_t elcr1 = in8(0x4D1);
 
dbgprintf("pci route: %x %x elcr: %x %x\n", route0, route1, elcr0, elcr1);
 
LEAVE();
 
return 0;
1022,7 → 1360,6
dbgprintf("SRV_ENUM_MODES inp %x inp_size %x out_size %x\n",
inp, io->inp_size, io->out_size );
check_output(4);
check_input(*outp * sizeof(videomode_t));
if( radeon_modeset)
retval = get_modes((videomode_t*)inp, outp);
break;
1036,12 → 1373,12
break;
 
case SRV_CREATE_VIDEO:
retval = r600_create_video(inp[0], inp[1], outp);
// retval = r600_create_video(inp[0], inp[1], outp);
break;
 
case SRV_BLIT_VIDEO:
r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
inp[4], inp[5], inp[6]);
// r600_video_blit( ((uint64_t*)inp)[0], inp[2], inp[3],
// inp[4], inp[5], inp[6]);
 
retval = 0;
break;
1049,7 → 1386,7
case SRV_CREATE_BITMAP:
check_input(8);
check_output(4);
retval = create_bitmap(outp, inp[0], inp[1]);
// retval = create_bitmap(outp, inp[0], inp[1]);
break;
 
};
1064,7 → 1401,7
{
struct radeon_device *rdev = NULL;
 
struct pci_device_id *ent;
const struct pci_device_id *ent;
 
int err;
u32_t retval = 0;
1088,7 → 1425,7
return 0;
};
}
dbgprintf("Radeon RC11 cmdline %s\n", cmdline);
dbgprintf("Radeon RC12 preview 1 cmdline %s\n", cmdline);
 
enum_pci_devices();
 
1121,3 → 1458,26
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
{};
 
 
#define PCI_CLASS_REVISION 0x08
#define PCI_CLASS_DISPLAY_VGA 0x0300
 
int pci_scan_filter(u32_t id, u32_t busnr, u32_t devfn)
{
u16_t vendor, device;
u32_t class;
int ret = 0;
 
vendor = id & 0xffff;
device = (id >> 16) & 0xffff;
 
if(vendor == 0x1002)
{
class = PciRead32(busnr, devfn, PCI_CLASS_REVISION);
class >>= 16;
 
if( class == PCI_CLASS_DISPLAY_VGA)
ret = 1;
}
return ret;
}
/drivers/video/drm/radeon/radeon_display.c
23,18 → 23,16
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
#include "atom.h"
#include <asm/div64.h>
 
#include "drm_crtc_helper.h"
#include "drm_edid.h"
#include <drm/drm_crtc_helper.h>
#include <drm/drm_edid.h>
 
static int radeon_ddc_dump(struct drm_connector *connector);
 
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
288,7 → 286,7
radeon_legacy_init_crtc(dev, radeon_crtc);
}
 
static const char *encoder_names[36] = {
static const char *encoder_names[37] = {
"NONE",
"INTERNAL_LVDS",
"INTERNAL_TMDS1",
325,26 → 323,9
"INTERNAL_UNIPHY2",
"NUTMEG",
"TRAVIS",
"INTERNAL_VCE"
};
 
static const char *connector_names[15] = {
"Unknown",
"VGA",
"DVI-I",
"DVI-D",
"DVI-A",
"Composite",
"S-video",
"LVDS",
"Component",
"DIN",
"DisplayPort",
"HDMI-A",
"HDMI-B",
"TV",
"eDP",
};
 
static const char *hpd_names[6] = {
"HPD1",
"HPD2",
367,7 → 348,7
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
radeon_connector = to_radeon_connector(connector);
DRM_INFO("Connector %d:\n", i);
DRM_INFO(" %s\n", connector_names[connector->connector_type]);
DRM_INFO(" %s\n", drm_get_connector_name(connector));
if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
DRM_INFO(" %s\n", hpd_names[radeon_connector->hpd.hpd]);
if (radeon_connector->ddc_bus) {
433,7 → 414,6
static bool radeon_setup_enc_conn(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *drm_connector;
bool ret = false;
 
if (rdev->bios) {
453,8 → 433,6
if (ret) {
radeon_setup_encoder_clones(dev);
radeon_print_display_setup(dev);
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
radeon_ddc_dump(drm_connector);
}
 
return ret;
471,17 → 449,23
radeon_router_select_ddc_port(radeon_connector);
 
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
(radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) ||
(radeon_connector_encoder_get_dp_bridge_encoder_id(&radeon_connector->base) !=
ENCODER_OBJECT_ID_NONE)) {
struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) && dig->dp_i2c_bus)
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&dig->dp_i2c_bus->adapter);
else if (radeon_connector->ddc_bus && !radeon_connector->edid)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
} else {
if (radeon_connector->ddc_bus && !radeon_connector->edid)
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
}
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
}
 
if (!radeon_connector->edid) {
if (rdev->is_atom_bios) {
502,34 → 486,6
return 0;
}
 
static int radeon_ddc_dump(struct drm_connector *connector)
{
struct edid *edid;
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
int ret = 0;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
 
if (!radeon_connector->ddc_bus)
return -1;
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
/* Log EDID retrieval status here. In particular with regard to
* connectors with requires_extended_probe flag set, that will prevent
* function radeon_dvi_detect() to fetch EDID on this connector,
* as long as there is no valid EDID header found */
if (edid) {
DRM_INFO("Radeon display connector %s: Found valid EDID",
drm_get_connector_name(connector));
kfree(edid);
} else {
DRM_INFO("Radeon display connector %s: No monitor connected or invalid EDID",
drm_get_connector_name(connector));
}
return ret;
}
 
/* avivo */
static void avivo_get_fb_div(struct radeon_pll *pll,
u32 target_clock,
867,15 → 823,25
.create_handle = radeon_user_framebuffer_create_handle,
};
 
void
int
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
 
ENTER();
 
rfb->obj = obj;
drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
if (ret) {
rfb->obj = NULL;
return ret;
}
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
LEAVE();
return 0;
}
 
 
885,11 → 851,6
// .output_poll_changed = radeon_output_poll_changed
};
 
struct drm_prop_enum_list {
int type;
char *name;
};
 
static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] =
{ { 0, "driver" },
{ 1, "bios" },
914,86 → 875,53
 
static int radeon_modeset_create_props(struct radeon_device *rdev)
{
int i, sz;
int sz;
 
if (rdev->is_atom_bios) {
rdev->mode_info.coherent_mode_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"coherent", 2);
drm_property_create_range(rdev->ddev, 0 , "coherent", 0, 1);
if (!rdev->mode_info.coherent_mode_property)
return -ENOMEM;
 
rdev->mode_info.coherent_mode_property->values[0] = 0;
rdev->mode_info.coherent_mode_property->values[1] = 1;
}
 
if (!ASIC_IS_AVIVO(rdev)) {
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list);
rdev->mode_info.tmds_pll_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"tmds_pll", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.tmds_pll_property,
i,
radeon_tmds_pll_enum_list[i].type,
radeon_tmds_pll_enum_list[i].name);
drm_property_create_enum(rdev->ddev, 0,
"tmds_pll",
radeon_tmds_pll_enum_list, sz);
}
}
 
rdev->mode_info.load_detect_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"load detection", 2);
drm_property_create_range(rdev->ddev, 0, "load detection", 0, 1);
if (!rdev->mode_info.load_detect_property)
return -ENOMEM;
rdev->mode_info.load_detect_property->values[0] = 0;
rdev->mode_info.load_detect_property->values[1] = 1;
 
drm_mode_create_scaling_mode_property(rdev->ddev);
 
sz = ARRAY_SIZE(radeon_tv_std_enum_list);
rdev->mode_info.tv_std_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"tv standard", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.tv_std_property,
i,
radeon_tv_std_enum_list[i].type,
radeon_tv_std_enum_list[i].name);
}
drm_property_create_enum(rdev->ddev, 0,
"tv standard",
radeon_tv_std_enum_list, sz);
 
sz = ARRAY_SIZE(radeon_underscan_enum_list);
rdev->mode_info.underscan_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_ENUM,
"underscan", sz);
for (i = 0; i < sz; i++) {
drm_property_add_enum(rdev->mode_info.underscan_property,
i,
radeon_underscan_enum_list[i].type,
radeon_underscan_enum_list[i].name);
}
drm_property_create_enum(rdev->ddev, 0,
"underscan",
radeon_underscan_enum_list, sz);
 
rdev->mode_info.underscan_hborder_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"underscan hborder", 2);
drm_property_create_range(rdev->ddev, 0,
"underscan hborder", 0, 128);
if (!rdev->mode_info.underscan_hborder_property)
return -ENOMEM;
rdev->mode_info.underscan_hborder_property->values[0] = 0;
rdev->mode_info.underscan_hborder_property->values[1] = 128;
 
rdev->mode_info.underscan_vborder_property =
drm_property_create(rdev->ddev,
DRM_MODE_PROP_RANGE,
"underscan vborder", 2);
drm_property_create_range(rdev->ddev, 0,
"underscan vborder", 0, 128);
if (!rdev->mode_info.underscan_vborder_property)
return -ENOMEM;
rdev->mode_info.underscan_vborder_property->values[0] = 0;
rdev->mode_info.underscan_vborder_property->values[1] = 128;
 
return 0;
}
1018,6 → 946,93
 
}
 
/*
* Allocate hdmi structs and determine register offsets
*/
static void radeon_afmt_init(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++)
rdev->mode_info.afmt[i] = NULL;
 
if (ASIC_IS_DCE6(rdev)) {
/* todo */
} else if (ASIC_IS_DCE4(rdev)) {
/* DCE4/5 has 6 audio blocks tied to DIG encoders */
/* DCE4.1 has 2 audio blocks tied to DIG encoders */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
rdev->mode_info.afmt[0]->id = 0;
}
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
rdev->mode_info.afmt[1]->id = 1;
}
if (!ASIC_IS_DCE41(rdev)) {
rdev->mode_info.afmt[2] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[2]) {
rdev->mode_info.afmt[2]->offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
rdev->mode_info.afmt[2]->id = 2;
}
rdev->mode_info.afmt[3] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[3]) {
rdev->mode_info.afmt[3]->offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
rdev->mode_info.afmt[3]->id = 3;
}
rdev->mode_info.afmt[4] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[4]) {
rdev->mode_info.afmt[4]->offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
rdev->mode_info.afmt[4]->id = 4;
}
rdev->mode_info.afmt[5] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[5]) {
rdev->mode_info.afmt[5]->offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
rdev->mode_info.afmt[5]->id = 5;
}
}
} else if (ASIC_IS_DCE3(rdev)) {
/* DCE3.x has 2 audio blocks tied to DIG encoders */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = DCE3_HDMI_OFFSET0;
rdev->mode_info.afmt[0]->id = 0;
}
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = DCE3_HDMI_OFFSET1;
rdev->mode_info.afmt[1]->id = 1;
}
} else if (ASIC_IS_DCE2(rdev)) {
/* DCE2 has at least 1 routable audio block */
rdev->mode_info.afmt[0] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[0]) {
rdev->mode_info.afmt[0]->offset = DCE2_HDMI_OFFSET0;
rdev->mode_info.afmt[0]->id = 0;
}
/* r6xx has 2 routable audio blocks */
if (rdev->family >= CHIP_R600) {
rdev->mode_info.afmt[1] = kzalloc(sizeof(struct radeon_afmt), GFP_KERNEL);
if (rdev->mode_info.afmt[1]) {
rdev->mode_info.afmt[1]->offset = DCE2_HDMI_OFFSET1;
rdev->mode_info.afmt[1]->id = 1;
}
}
}
}
 
static void radeon_afmt_fini(struct radeon_device *rdev)
{
int i;
 
for (i = 0; i < RADEON_MAX_AFMT_BLOCKS; i++) {
kfree(rdev->mode_info.afmt[i]);
rdev->mode_info.afmt[i] = NULL;
}
}
 
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
1026,7 → 1041,7
drm_mode_config_init(rdev->ddev);
rdev->mode_info.mode_config_initialized = true;
 
rdev->ddev->mode_config.funcs = (void *)&radeon_mode_funcs;
rdev->ddev->mode_config.funcs = &radeon_mode_funcs;
 
if (ASIC_IS_DCE5(rdev)) {
rdev->ddev->mode_config.max_width = 16384;
1039,6 → 1054,9
rdev->ddev->mode_config.max_height = 4096;
}
 
rdev->ddev->mode_config.preferred_depth = 24;
rdev->ddev->mode_config.prefer_shadow = 1;
 
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
 
ret = radeon_modeset_create_props(rdev);
1066,13 → 1084,18
return ret;
}
 
/* init dig PHYs */
if (rdev->is_atom_bios)
/* init dig PHYs, disp eng pll */
if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
radeon_atom_disp_eng_pll_init(rdev);
}
 
/* initialize hpd */
// radeon_hpd_init(rdev);
 
/* setup afmt */
// radeon_afmt_init(rdev);
 
/* Initialize power management */
// radeon_pm_init(rdev);
 
1087,6 → 1110,7
kfree(rdev->mode_info.bios_hardcoded_edid);
 
if (rdev->mode_info.mode_config_initialized) {
// radeon_afmt_fini(rdev);
// drm_kms_helper_poll_fini(rdev->ddev);
// radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
1096,7 → 1120,7
radeon_i2c_fini(rdev);
}
 
static bool is_hdtv_mode(struct drm_display_mode *mode)
static bool is_hdtv_mode(const struct drm_display_mode *mode)
{
/* try and guess if this is a tv or a monitor */
if ((mode->vdisplay == 480 && mode->hdisplay == 720) || /* 480p */
1109,7 → 1133,7
}
 
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
1126,6 → 1150,8
radeon_crtc->h_border = 0;
radeon_crtc->v_border = 0;
 
ENTER();
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc != crtc)
continue;
1133,6 → 1159,10
connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
 
dbgprintf("native_hdisplay %d vdisplay %d\n",
radeon_encoder->native_mode.hdisplay,
radeon_encoder->native_mode.vdisplay);
 
if (first) {
/* set scaling */
if (radeon_encoder->rmx_type == RMX_OFF)
1198,6 → 1228,9
radeon_crtc->vsc.full = dfixed_const(1);
radeon_crtc->hsc.full = dfixed_const(1);
}
 
LEAVE();
 
return true;
}
 
/drivers/video/drm/radeon/radeon_encoders.c
23,17 → 23,19
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
 
extern int atom_debug;
extern void
radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
extern void
radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
struct drm_connector *drm_connector);
 
/* evil but including atombios.h is much worse */
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
struct drm_display_mode *mode);
 
static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
{
156,30 → 158,10
return ret;
}
 
static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
return true;
default:
return false;
}
}
 
void
radeon_link_encoder_connector(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct drm_encoder *encoder;
190,11 → 172,19
radeon_connector = to_radeon_connector(connector);
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->devices & radeon_connector->devices)
if (radeon_encoder->devices & radeon_connector->devices) {
drm_mode_connector_attach_encoder(connector, encoder);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (rdev->is_atom_bios)
radeon_atom_backlight_init(radeon_encoder, connector);
else
radeon_legacy_backlight_init(radeon_encoder, connector);
rdev->mode_info.bl_encoder = radeon_encoder;
}
}
}
}
}
 
void radeon_encoder_set_active_device(struct drm_encoder *encoder)
{
229,7 → 219,7
return NULL;
}
 
static struct drm_connector *
struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
245,7 → 235,7
return NULL;
}
 
struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder)
struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
266,9 → 256,9
return NULL;
}
 
bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder)
u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
{
struct drm_encoder *other_encoder = radeon_atom_get_external_encoder(encoder);
struct drm_encoder *other_encoder = radeon_get_external_encoder(encoder);
 
if (other_encoder) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(other_encoder);
276,13 → 266,12
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
return true;
return radeon_encoder->encoder_id;
default:
return false;
return ENCODER_OBJECT_ID_NONE;
}
}
 
return false;
return ENCODER_OBJECT_ID_NONE;
}
 
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
332,339 → 321,15
 
}
 
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
/* hw bug */
if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
&& (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
/* get the native mode for LVDS */
if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT))
radeon_panel_mode_fixup(encoder, adjusted_mode);
 
/* get the native mode for TV */
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
if (tv_dac) {
if (tv_dac->tv_std == TV_STD_NTSC ||
tv_dac->tv_std == TV_STD_NTSC_J ||
tv_dac->tv_std == TV_STD_PAL_M)
radeon_atom_get_tv_timings(rdev, 0, adjusted_mode);
else
radeon_atom_get_tv_timings(rdev, 1, adjusted_mode);
}
}
 
if (ASIC_IS_DCE3(rdev) &&
((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
 
return true;
}
 
static void
atombios_dac_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
DAC_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
index = GetIndexIntoMasterTable(COMMAND, DAC1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
index = GetIndexIntoMasterTable(COMMAND, DAC2EncoderControl);
break;
}
 
args.ucAction = action;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT))
args.ucDacStandard = ATOM_DAC1_PS2;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.ucDacStandard = ATOM_DAC1_CV;
else {
switch (dac_info->tv_std) {
case TV_STD_PAL:
case TV_STD_PAL_M:
case TV_STD_SCART_PAL:
case TV_STD_SECAM:
case TV_STD_PAL_CN:
args.ucDacStandard = ATOM_DAC1_PAL;
break;
case TV_STD_NTSC:
case TV_STD_NTSC_J:
case TV_STD_PAL_60:
default:
args.ucDacStandard = ATOM_DAC1_NTSC;
break;
}
}
args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
static void
atombios_tv_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
TV_ENCODER_CONTROL_PS_ALLOCATION args;
int index = 0;
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv;
 
memset(&args, 0, sizeof(args));
 
index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl);
 
args.sTVEncoder.ucAction = action;
 
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.sTVEncoder.ucTvStandard = ATOM_TV_CV;
else {
switch (dac_info->tv_std) {
case TV_STD_NTSC:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
case TV_STD_PAL:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL;
break;
case TV_STD_PAL_M:
args.sTVEncoder.ucTvStandard = ATOM_TV_PALM;
break;
case TV_STD_PAL_60:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL60;
break;
case TV_STD_NTSC_J:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSCJ;
break;
case TV_STD_SCART_PAL:
args.sTVEncoder.ucTvStandard = ATOM_TV_PAL; /* ??? */
break;
case TV_STD_SECAM:
args.sTVEncoder.ucTvStandard = ATOM_TV_SECAM;
break;
case TV_STD_PAL_CN:
args.sTVEncoder.ucTvStandard = ATOM_TV_PALCN;
break;
default:
args.sTVEncoder.ucTvStandard = ATOM_TV_NTSC;
break;
}
}
 
args.sTVEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
union dvo_encoder_control {
ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION ext_tmds;
DVO_ENCODER_CONTROL_PS_ALLOCATION dvo;
DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 dvo_v3;
};
 
void
atombios_dvo_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
union dvo_encoder_control args;
int index = GetIndexIntoMasterTable(COMMAND, DVOEncoderControl);
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE3(rdev)) {
/* DCE3+ */
args.dvo_v3.ucAction = action;
args.dvo_v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.dvo_v3.ucDVOConfig = 0; /* XXX */
} else if (ASIC_IS_DCE2(rdev)) {
/* DCE2 (pre-DCE3 R6xx, RS600/690/740 */
args.dvo.sDVOEncoder.ucAction = action;
args.dvo.sDVOEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
/* DFP1, CRT1, TV1 depending on the type of port */
args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
 
if (radeon_encoder->pixel_clock > 165000)
args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
} else {
/* R4xx, R5xx */
args.ext_tmds.sXTmdsEncoder.ucEnable = action;
 
if (radeon_encoder->pixel_clock > 165000)
args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
 
/*if (pScrn->rgbBits == 8)*/
args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
union lvds_encoder_control {
LVDS_ENCODER_CONTROL_PS_ALLOCATION v1;
LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
};
 
void
atombios_digital_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
 
if (!dig)
return;
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
hdmi_detected = 1;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
index = GetIndexIntoMasterTable(COMMAND, TMDS1EncoderControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, LVDSEncoderControl);
else
index = GetIndexIntoMasterTable(COMMAND, TMDS2EncoderControl);
break;
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
case 2:
switch (crev) {
case 1:
args.v1.ucMisc = 0;
args.v1.ucAction = action;
if (hdmi_detected)
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
} else {
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
}
break;
case 2:
case 3:
args.v2.ucMisc = 0;
args.v2.ucAction = action;
if (crev == 3) {
if (dig->coherent_mode)
args.v2.ucMisc |= PANEL_ENCODER_MISC_COHERENT;
}
if (hdmi_detected)
args.v2.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v2.ucTruncate = 0;
args.v2.ucSpatial = 0;
args.v2.ucTemporal = 0;
args.v2.ucFRC = 0;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (dig->lcd_misc & ATOM_PANEL_MISC_DUAL)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
if (dig->lcd_misc & ATOM_PANEL_MISC_SPATIAL) {
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
}
if (dig->lcd_misc & ATOM_PANEL_MISC_TEMPORAL) {
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
if (dig->lcd_misc & ATOM_PANEL_MISC_888RGB)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
if (((dig->lcd_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
}
} else {
if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
if (radeon_encoder->pixel_clock > 165000)
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
break;
default:
DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
break;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
 
/* dp bridges are always DP */
if (radeon_encoder_is_dp_bridge(encoder))
return ATOM_ENCODER_MODE_DP;
 
/* DVO is always DVO */
if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
return ATOM_ENCODER_MODE_DVO;
 
connector = radeon_get_connector_for_encoder(encoder);
/* if we don't have an active device yet, just use one of
* the connectors tied to the encoder.
675,1737 → 340,45
 
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
case DRM_MODE_CONNECTOR_HDMIB:
if (radeon_connector->use_digital) {
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
return ATOM_ENCODER_MODE_HDMI;
} else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
return false;
} else {
if (pixel_clock > 165000)
return true;
else
return ATOM_ENCODER_MODE_CRT;
break;
return false;
}
} else
return false;
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
} else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
return ATOM_ENCODER_MODE_LVDS;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
dig_connector = radeon_connector->con_priv;
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
/* fix me */
if (ASIC_IS_DCE4(rdev))
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_HDMI;
} else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
return ATOM_ENCODER_MODE_DP;
case DRM_MODE_CONNECTOR_DVIA:
case DRM_MODE_CONNECTOR_VGA:
return ATOM_ENCODER_MODE_CRT;
break;
case DRM_MODE_CONNECTOR_Composite:
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_9PinDIN:
/* fix me */
return ATOM_ENCODER_MODE_TV;
/*return ATOM_ENCODER_MODE_CV;*/
break;
}
}
 
/*
* DIG Encoder/Transmitter Setup
*
* DCE 3.0/3.1
* - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
* Supports up to 3 digital outputs
* - 2 DIG encoder blocks.
* DIG1 can drive UNIPHY link A or link B
* DIG2 can drive UNIPHY link B or LVTMA
*
* DCE 3.2
* - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
* Supports up to 5 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* DCE 4.0/5.0
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 6 DIG encoder blocks.
* - DIG to PHY mapping is hardcoded
* DIG1 drives UNIPHY0 link A, A+B
* DIG2 drives UNIPHY0 link B
* DIG3 drives UNIPHY1 link A, A+B
* DIG4 drives UNIPHY1 link B
* DIG5 drives UNIPHY2 link A, A+B
* DIG6 drives UNIPHY2 link B
*
* DCE 4.1
* - 3 DIG transmitter blocks UNIPHY0/1/2 (links A and B).
* Supports up to 6 digital outputs
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
* crtc0 -> dig2 -> LVTMA links A+B -> TMDS/HDMI
* crtc1 -> dig1 -> UNIPHY0 link B -> DP
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
 
union dig_encoder_control {
DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
DIG_ENCODER_CONTROL_PARAMETERS_V4 v4;
};
 
void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
union dig_encoder_control args;
int index = 0;
uint8_t frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int hpd_id = RADEON_HPD_NONE;
int bpc = 8;
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
hpd_id = radeon_connector->hpd.hpd;
bpc = connector->display_info.bpc;
}
 
/* no dig encoder assigned */
if (dig->dig_encoder == -1)
return;
 
memset(&args, 0, sizeof(args));
 
if (ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
return false;
else {
if (dig->dig_encoder)
index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
/* HDMI 1.3 supports up to 340 Mhz over single link */
if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
if (pixel_clock > 340000)
return true;
else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
args.v1.ucAction = action;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
if (action == ATOM_ENCODER_CMD_SETUP_PANEL_MODE)
args.v3.ucPanelMode = panel_mode;
else
args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
(args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST))
args.v1.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
 
if (ASIC_IS_DCE5(rdev)) {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) ||
(args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP_MST)) {
if (dp_clock == 270000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V4_DPLINKRATE_5_40GHZ;
}
args.v4.acConfig.ucDigSel = dig->dig_encoder;
switch (bpc) {
case 0:
args.v4.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v4.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v4.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v4.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v4.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v4.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
if (hpd_id == RADEON_HPD_NONE)
args.v4.ucHPD_ID = 0;
else
args.v4.ucHPD_ID = hpd_id + 1;
} else if (ASIC_IS_DCE4(rdev)) {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
args.v3.acConfig.ucDigSel = dig->dig_encoder;
switch (bpc) {
case 0:
args.v3.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v3.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v3.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v3.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v3.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
return false;
} else {
if ((args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) && (dp_clock == 270000))
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
if (dig->linkb)
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
if (pixel_clock > 165000)
return true;
else
args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
}
 
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V4 v4;
};
 
void
atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
struct drm_connector *connector;
union dig_transmitter_control args;
int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
int igp_lane_info = 0;
int dig_encoder = dig->dig_encoder;
 
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
connector = radeon_get_connector_for_encoder_init(encoder);
/* just needed to avoid bailing in the encoder check. the encoder
* isn't used for init
*/
dig_encoder = 0;
} else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
igp_lane_info = dig_connector->igp_lane_info;
}
 
/* no dig encoder assigned */
if (dig_encoder == -1)
return;
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
 
memset(&args, 0, sizeof(args));
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
index = GetIndexIntoMasterTable(COMMAND, LVTMATransmitterControl);
break;
}
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
args.v1.usInitInfo = cpu_to_le16(connector_object_id);
} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
args.v1.asMode.ucLaneSel = lane_num;
args.v1.asMode.ucLaneSet = lane_set;
} else {
if (is_dp)
args.v1.usPixelClock =
cpu_to_le16(dp_clock / 10);
else if (radeon_encoder->pixel_clock > 165000)
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
if (ASIC_IS_DCE4(rdev)) {
if (is_dp)
args.v3.ucLaneNum = dp_lane_count;
else if (radeon_encoder->pixel_clock > 165000)
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
 
if (dig->linkb)
args.v3.acConfig.ucLinkSel = 1;
if (dig_encoder & 1)
args.v3.acConfig.ucEncoderSel = 1;
 
/* Select the PLL for the PHY
* DP PHY should be clocked from external src if there is
* one.
*/
if (encoder->crtc) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
pll_id = radeon_crtc->pll_id;
}
 
if (ASIC_IS_DCE5(rdev)) {
/* On DCE5 DCPLL usually generates the DP ref clock */
if (is_dp) {
if (rdev->clock.dp_extclk)
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_EXTCLK;
else
args.v4.acConfig.ucRefClkSource = ENCODER_REFCLK_SRC_DCPLL;
} else
args.v4.acConfig.ucRefClkSource = pll_id;
} else {
/* On DCE4, if there is an external clock, it generates the DP ref clock */
if (is_dp && rdev->clock.dp_extclk)
args.v3.acConfig.ucRefClkSource = 2; /* external src */
else
args.v3.acConfig.ucRefClkSource = pll_id;
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v3.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v3.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v3.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp)
args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
args.v3.acConfig.fDualLinkConnector = 1;
}
} else if (ASIC_IS_DCE32(rdev)) {
args.v2.acConfig.ucEncoderSel = dig_encoder;
if (dig->linkb)
args.v2.acConfig.ucLinkSel = 1;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
break;
}
 
if (is_dp) {
args.v2.acConfig.fCoherentMode = 1;
args.v2.acConfig.fDPConnector = 1;
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
if (radeon_encoder->pixel_clock > 165000)
args.v2.acConfig.fDualLinkConnector = 1;
}
} else {
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
 
if (dig_encoder)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (igp_lane_info & 0x2)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
else if (igp_lane_info & 0x4)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
else if (igp_lane_info & 0x8)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
} else {
if (igp_lane_info & 0x3)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
else if (igp_lane_info & 0xc)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
}
 
if (dig->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
 
if (is_dp)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
if (radeon_encoder->pixel_clock > 165000)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
bool
atombios_set_edp_panel_power(struct drm_connector *connector, int action)
{
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_device *dev = radeon_connector->base.dev;
struct radeon_device *rdev = dev->dev_private;
union dig_transmitter_control args;
int index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
uint8_t frev, crev;
 
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP)
goto done;
 
if (!ASIC_IS_DCE4(rdev))
goto done;
 
if ((action != ATOM_TRANSMITTER_ACTION_POWER_ON) &&
(action != ATOM_TRANSMITTER_ACTION_POWER_OFF))
goto done;
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
goto done;
 
memset(&args, 0, sizeof(args));
 
args.v1.ucAction = action;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* wait for the panel to power up */
if (action == ATOM_TRANSMITTER_ACTION_POWER_ON) {
int i;
 
for (i = 0; i < 300; i++) {
if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
return true;
mdelay(1);
}
return false;
}
done:
return true;
}
 
union external_encoder_control {
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION v1;
EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION_V3 v3;
};
 
static void
atombios_external_encoder_setup(struct drm_encoder *encoder,
struct drm_encoder *ext_encoder,
int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
union external_encoder_control args;
struct drm_connector *connector;
int index = GetIndexIntoMasterTable(COMMAND, ExternalEncoderControl);
u8 frev, crev;
int dp_clock = 0;
int dp_lane_count = 0;
int connector_object_id = 0;
u32 ext_enum = (ext_radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
int bpc = 8;
 
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
connector = radeon_get_connector_for_encoder_init(encoder);
else
connector = radeon_get_connector_for_encoder(encoder);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector =
radeon_connector->con_priv;
 
dp_clock = dig_connector->dp_clock;
dp_lane_count = dig_connector->dp_lane_count;
connector_object_id =
(radeon_connector->connector_object_id & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
bpc = connector->display_info.bpc;
}
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
/* no params on frev 1 */
break;
case 2:
switch (crev) {
case 1:
case 2:
args.v1.sDigEncoder.ucAction = action;
args.v1.sDigEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v1.sDigEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v1.sDigEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dp_clock == 270000)
args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v1.sDigEncoder.ucLaneNum = 8;
else
args.v1.sDigEncoder.ucLaneNum = 4;
break;
case 3:
args.v3.sExtEncoder.ucAction = action;
if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
else
args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
 
if (args.v3.sExtEncoder.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
if (dp_clock == 270000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_2_70GHZ;
else if (dp_clock == 540000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
} else if (radeon_encoder->pixel_clock > 165000)
args.v3.sExtEncoder.ucLaneNum = 8;
else
args.v3.sExtEncoder.ucLaneNum = 4;
switch (ext_enum) {
case GRAPH_OBJECT_ENUM_ID1:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER1;
break;
case GRAPH_OBJECT_ENUM_ID2:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER2;
break;
case GRAPH_OBJECT_ENUM_ID3:
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_ENCODER3;
break;
}
switch (bpc) {
case 0:
args.v3.sExtEncoder.ucBitPerColor = PANEL_BPC_UNDEFINE;
break;
case 6:
args.v3.sExtEncoder.ucBitPerColor = PANEL_6BIT_PER_COLOR;
break;
case 8:
default:
args.v3.sExtEncoder.ucBitPerColor = PANEL_8BIT_PER_COLOR;
break;
case 10:
args.v3.sExtEncoder.ucBitPerColor = PANEL_10BIT_PER_COLOR;
break;
case 12:
args.v3.sExtEncoder.ucBitPerColor = PANEL_12BIT_PER_COLOR;
break;
case 16:
args.v3.sExtEncoder.ucBitPerColor = PANEL_16BIT_PER_COLOR;
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
 
static void
atombios_yuv_setup(struct drm_encoder *encoder, bool enable)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
ENABLE_YUV_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, EnableYUV);
uint32_t temp, reg;
 
memset(&args, 0, sizeof(args));
 
if (rdev->family >= CHIP_R600)
reg = R600_BIOS_3_SCRATCH;
else
reg = RADEON_BIOS_3_SCRATCH;
 
/* XXX: fix up scratch reg handling */
temp = RREG32(reg);
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
WREG32(reg, (ATOM_S3_TV1_ACTIVE |
(radeon_crtc->crtc_id << 18)));
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24)));
else
WREG32(reg, 0);
 
if (enable)
args.ucEnable = ATOM_ENABLE;
args.ucCRTC = radeon_crtc->crtc_id;
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
WREG32(reg, temp);
}
 
static void
radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args;
int index = 0;
bool is_dig = false;
bool is_dce5_dac = false;
bool is_dce5_dvo = false;
 
memset(&args, 0, sizeof(args));
 
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
index = GetIndexIntoMasterTable(COMMAND, TMDSAOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
is_dig = true;
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
if (ASIC_IS_DCE5(rdev))
is_dce5_dvo = true;
else if (ASIC_IS_DCE3(rdev))
is_dig = true;
else
index = GetIndexIntoMasterTable(COMMAND, DVOOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, LCD1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, LVTMAOutputControl);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (ASIC_IS_DCE5(rdev))
is_dce5_dac = true;
else {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl);
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl);
else
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl);
break;
}
 
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
/* some early dce3.2 boards have a bug in their transmitter control table */
if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
else
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if (connector &&
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector =
radeon_connector->con_priv;
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLON, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
if (connector &&
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *radeon_dig_connector =
radeon_connector->con_priv;
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_OFF);
radeon_dig_connector->edp_on = false;
}
}
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
break;
}
} else if (is_dce5_dac) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dac_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dac_setup(encoder, ATOM_DISABLE);
break;
}
} else if (is_dce5_dvo) {
switch (mode) {
case DRM_MODE_DPMS_ON:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
}
} else {
switch (mode) {
case DRM_MODE_DPMS_ON:
args.ucAction = ATOM_ENABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLON;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
args.ucAction = ATOM_DISABLE;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
args.ucAction = ATOM_LCD_BLOFF;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
break;
}
}
 
if (ext_encoder) {
switch (mode) {
case DRM_MODE_DPMS_ON:
default:
if (ASIC_IS_DCE41(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
if (ASIC_IS_DCE41(rdev)) {
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
} else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
break;
}
}
 
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
 
}
 
union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
};
 
static void
atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return;
 
switch (frev) {
case 1:
switch (crev) {
case 1:
default:
if (ASIC_IS_AVIVO(rdev))
args.v1.ucCRTC = radeon_crtc->crtc_id;
else {
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) {
args.v1.ucCRTC = radeon_crtc->crtc_id;
} else {
args.v1.ucCRTC = radeon_crtc->crtc_id << 2;
}
}
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
args.v1.ucDevice = ATOM_DEVICE_DFP1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT)
args.v1.ucDevice = ATOM_DEVICE_LCD1_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_DFP3_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v1.ucDevice = ATOM_DEVICE_DFP2_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX;
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX;
else
args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX;
break;
}
break;
case 2:
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder);
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
switch (dig->dig_encoder) {
case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
break;
case 1:
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
break;
case 2:
args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
break;
case 3:
args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
break;
case 4:
args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
break;
case 5:
args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
break;
}
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID;
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT))
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID;
else
args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID;
break;
}
break;
}
break;
default:
DRM_ERROR("Unknown table version: %d, %d\n", frev, crev);
return;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
/* update scratch regs with new routing */
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
 
static void
atombios_apply_encoder_quirks(struct drm_encoder *encoder,
struct drm_display_mode *mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
/* Funky macbooks */
if ((dev->pdev->device == 0x71C5) &&
(dev->pdev->subsystem_vendor == 0x106b) &&
(dev->pdev->subsystem_device == 0x0080)) {
if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) {
uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL);
 
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_TRUNCATE_EN;
lvtma_bit_depth_control &= ~AVIVO_LVTMA_BIT_DEPTH_CONTROL_SPATIAL_DITHER_EN;
 
WREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL, lvtma_bit_depth_control);
}
}
 
/* set scaler clears this on some chips */
if (ASIC_IS_AVIVO(rdev) &&
(!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
if (ASIC_IS_DCE4(rdev)) {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
EVERGREEN_INTERLEAVE_EN);
else
WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
} else {
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
AVIVO_D1MODE_INTERLEAVE_EN);
else
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
}
}
}
 
static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
 
/* DCE4/5 */
if (ASIC_IS_DCE4(rdev)) {
dig = radeon_encoder->enc_priv;
if (ASIC_IS_DCE41(rdev))
return radeon_crtc->crtc_id;
else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
if (dig->linkb)
return 1;
else
return 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
if (dig->linkb)
return 3;
else
return 2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (dig->linkb)
return 5;
else
return 4;
break;
}
}
}
 
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
}
 
/* on DCE3 - LVTMA can only be driven by DIGB */
list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_test_encoder;
 
if (encoder == test_encoder)
continue;
 
if (!radeon_encoder_is_digital(test_encoder))
continue;
 
radeon_test_encoder = to_radeon_encoder(test_encoder);
dig = radeon_test_encoder->enc_priv;
 
if (dig->dig_encoder >= 0)
dig_enc_in_use |= (1 << dig->dig_encoder);
}
 
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA) {
if (dig_enc_in_use & 0x2)
DRM_ERROR("LVDS required digital encoder 2 but it was in use - stealing\n");
return 1;
}
if (!(dig_enc_in_use & 1))
return 0;
return 1;
}
 
/* This only needs to be called once at startup */
void
radeon_atom_encoder_init(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
struct drm_encoder *encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
break;
default:
break;
}
 
if (ext_encoder && ASIC_IS_DCE41(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT);
}
}
 
static void
radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
radeon_encoder->pixel_clock = adjusted_mode->clock;
 
if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
else
atombios_yuv_setup(encoder, false);
}
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
/* setup and enable the encoder */
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
 
/* enable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
} else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
 
/* setup and enable the encoder and transmitter */
atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_ENABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_ENABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_ENABLE);
else
atombios_tv_setup(encoder, ATOM_DISABLE);
}
break;
}
 
if (ext_encoder) {
if (ASIC_IS_DCE41(rdev))
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP);
else
atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
}
 
atombios_apply_encoder_quirks(encoder, adjusted_mode);
 
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
r600_hdmi_enable(encoder);
r600_hdmi_setmode(encoder, adjusted_mode);
}
}
 
static bool
atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT |
ATOM_DEVICE_CV_SUPPORT |
ATOM_DEVICE_CRT_SUPPORT)) {
DAC_LOAD_DETECTION_PS_ALLOCATION args;
int index = GetIndexIntoMasterTable(COMMAND, DAC_LoadDetection);
uint8_t frev, crev;
 
memset(&args, 0, sizeof(args));
 
if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
return false;
 
args.sDacload.ucMisc = 0;
 
if ((radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) ||
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1))
args.sDacload.ucDacType = ATOM_DAC_A;
else
args.sDacload.ucDacType = ATOM_DAC_B;
 
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT);
else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT);
else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT);
if (crev >= 3)
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb;
}
 
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
return true;
} else
return false;
}
 
static enum drm_connector_status
radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
uint32_t bios_0_scratch;
 
if (!atombios_dac_load_detect(encoder, connector)) {
DRM_DEBUG_KMS("detect returned false \n");
return connector_status_unknown;
}
 
if (rdev->family >= CHIP_R600)
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
else
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH);
 
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
 
static enum drm_connector_status
radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
u32 bios_0_scratch;
 
if (!ASIC_IS_DCE4(rdev))
return connector_status_unknown;
 
if (!ext_encoder)
return connector_status_unknown;
 
if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
return connector_status_unknown;
 
/* load detect on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
 
bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
 
DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT1_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
if (bios_0_scratch & ATOM_S0_CRT2_MASK)
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
return connector_status_connected;
}
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
return connector_status_connected; /* CTV */
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
return connector_status_connected; /* STV */
}
return connector_status_disconnected;
}
 
void
radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
{
struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
 
if (ext_encoder)
/* ddc_setup on the dp bridge */
atombios_external_encoder_setup(encoder, ext_encoder,
EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
 
}
 
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
if ((radeon_encoder->active_device &
(ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
radeon_encoder_is_dp_bridge(encoder)) {
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
if (dig)
dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
}
 
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
if (connector) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
 
/* select the clock/data port if it uses a router */
if (radeon_connector->router.cd_valid)
radeon_router_select_cd_port(radeon_connector);
 
/* turn eDP panel on for mode set */
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
atombios_set_edp_panel_power(connector,
ATOM_TRANSMITTER_ACTION_POWER_ON);
}
 
/* this is needed for the pll/ss setup to work correctly in some cases */
atombios_set_encoder_crtc_source(encoder);
}
 
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
{
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
radeon_atom_output_lock(encoder, false);
}
 
static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig;
 
/* check for pre-DCE3 cards with shared encoders;
* can't really use the links individually, so don't disable
* the encoder if it's in use by another connector
*/
if (!ASIC_IS_DCE3(rdev)) {
struct drm_encoder *other_encoder;
struct radeon_encoder *other_radeon_encoder;
 
list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
other_radeon_encoder = to_radeon_encoder(other_encoder);
if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
drm_helper_encoder_in_use(other_encoder))
goto disable_done;
}
}
 
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
if (ASIC_IS_DCE4(rdev))
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
else {
/* disable the encoder and transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0);
}
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
atombios_dvo_setup(encoder, ATOM_DISABLE);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
atombios_dac_setup(encoder, ATOM_DISABLE);
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
atombios_tv_setup(encoder, ATOM_DISABLE);
break;
}
 
disable_done:
if (radeon_encoder_is_digital(encoder)) {
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
r600_hdmi_disable(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
radeon_encoder->active_device = 0;
}
 
/* these are handled by the primary encoders */
static void radeon_atom_ext_prepare(struct drm_encoder *encoder)
{
 
}
 
static void radeon_atom_ext_commit(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
 
}
 
static void radeon_atom_ext_disable(struct drm_encoder *encoder)
{
 
}
 
static void
radeon_atom_ext_dpms(struct drm_encoder *encoder, int mode)
{
 
}
 
static bool radeon_atom_ext_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
 
static const struct drm_encoder_helper_funcs radeon_atom_ext_helper_funcs = {
.dpms = radeon_atom_ext_dpms,
.mode_fixup = radeon_atom_ext_mode_fixup,
.prepare = radeon_atom_ext_prepare,
.mode_set = radeon_atom_ext_mode_set,
.commit = radeon_atom_ext_commit,
.disable = radeon_atom_ext_disable,
/* no detect for TMDS/LVDS yet */
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
.prepare = radeon_atom_encoder_prepare,
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.disable = radeon_atom_encoder_disable,
.detect = radeon_atom_dig_detect,
};
 
static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
.dpms = radeon_atom_encoder_dpms,
.mode_fixup = radeon_atom_mode_fixup,
.prepare = radeon_atom_encoder_prepare,
.mode_set = radeon_atom_encoder_mode_set,
.commit = radeon_atom_encoder_commit,
.detect = radeon_atom_dac_detect,
};
 
void radeon_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
}
 
static const struct drm_encoder_funcs radeon_atom_enc_funcs = {
.destroy = radeon_enc_destroy,
};
 
struct radeon_encoder_atom_dac *
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL);
 
if (!dac)
return NULL;
 
dac->tv_std = radeon_atombios_get_tv_info(rdev);
return dac;
}
 
struct radeon_encoder_atom_dig *
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder)
{
int encoder_enum = (radeon_encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT;
struct radeon_encoder_atom_dig *dig = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
 
if (!dig)
return NULL;
 
/* coherent mode by default */
dig->coherent_mode = true;
dig->dig_encoder = -1;
 
if (encoder_enum == 2)
dig->linkb = true;
else
dig->linkb = false;
 
return dig;
}
 
void
radeon_add_atom_encoder(struct drm_device *dev,
uint32_t encoder_enum,
uint32_t supported_device,
u16 caps)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_encoder *encoder;
struct radeon_encoder *radeon_encoder;
 
/* see if we already added it */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
radeon_encoder = to_radeon_encoder(encoder);
if (radeon_encoder->encoder_enum == encoder_enum) {
radeon_encoder->devices |= supported_device;
return;
}
 
}
 
/* add a new one */
radeon_encoder = kzalloc(sizeof(struct radeon_encoder), GFP_KERNEL);
if (!radeon_encoder)
return;
 
encoder = &radeon_encoder->base;
switch (rdev->num_crtc) {
case 1:
encoder->possible_crtcs = 0x1;
break;
case 2:
default:
encoder->possible_crtcs = 0x3;
break;
case 4:
encoder->possible_crtcs = 0xf;
break;
case 6:
encoder->possible_crtcs = 0x3f;
break;
}
 
radeon_encoder->enc_priv = NULL;
 
radeon_encoder->encoder_enum = encoder_enum;
radeon_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT;
radeon_encoder->devices = supported_device;
radeon_encoder->rmx_type = RMX_OFF;
radeon_encoder->underscan_type = UNDERSCAN_OFF;
radeon_encoder->is_ext_encoder = false;
radeon_encoder->caps = caps;
 
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_LVDS:
case ENCODER_OBJECT_ID_INTERNAL_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC1:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DAC2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC);
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder);
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs);
break;
case ENCODER_OBJECT_ID_INTERNAL_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
case ENCODER_OBJECT_ID_INTERNAL_DDI:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
radeon_encoder->rmx_type = RMX_FULL;
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder);
} else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
} else {
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder);
}
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
case ENCODER_OBJECT_ID_SI170B:
case ENCODER_OBJECT_ID_CH7303:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOA:
case ENCODER_OBJECT_ID_EXTERNAL_SDVOB:
case ENCODER_OBJECT_ID_TITFP513:
case ENCODER_OBJECT_ID_VT1623:
case ENCODER_OBJECT_ID_HDMI_SI1930:
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
/* these are handled by the primary encoders */
radeon_encoder->is_ext_encoder = true;
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS);
else if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT))
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_DAC);
else
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &radeon_atom_ext_helper_funcs);
break;
}
}
/drivers/video/drm/radeon/radeon_family.h
87,6 → 87,10
CHIP_TURKS,
CHIP_CAICOS,
CHIP_CAYMAN,
CHIP_ARUBA,
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
CHIP_LAST,
};
 
/drivers/video/drm/radeon/radeon_fb.c
27,20 → 27,17
#include <linux/slab.h>
#include <linux/fb.h>
 
#include "drmP.h"
#include "drm.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
#include "drm_fb_helper.h"
#include <drm/drm_fb_helper.h>
 
#include <drm_mm.h>
#include "radeon_object.h"
 
int radeonfb_create_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p);
 
/* object hierarchy -
55,7 → 52,7
};
 
static struct fb_ops radeonfb_ops = {
// .owner = THIS_MODULE,
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
// .fb_fillrect = cfb_fillrect,
98,7 → 95,7
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
struct drm_mode_fb_cmd mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
114,15 → 111,21
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
 
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.depth = sizes->surface_depth;
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
 
ret = radeonfb_create_object(rfbdev, &mode_cmd, &gobj);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon object %d\n", ret);
return ret;
}
 
rbo = gem_to_radeon_bo(gobj);
 
/* okay we have an object now allocate the framebuffer */
info = framebuffer_alloc(0, device);
if (info == NULL) {
dbgprintf("framebuffer_alloc\n");
ret = -ENOMEM;
goto out_unref;
}
129,7 → 132,11
 
info->par = rfbdev;
 
radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
DRM_ERROR("failed to initalise framebuffer %d\n", ret);
goto out_unref;
}
 
fb = &rfbdev->rfb.base;
 
141,7 → 148,7
 
strcpy(info->fix.id, "radeondrmfb");
 
drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
 
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
163,21 → 170,17
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
 
// info->pixmap.size = 64*1024;
// info->pixmap.buf_align = 8;
// info->pixmap.access_align = 32;
// info->pixmap.flags = FB_PIXMAP_SYSTEM;
// info->pixmap.scan_align = 1;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
 
if (info->screen_base == NULL) {
ret = -ENOSPC;
goto out_unref;
}
// if (info->screen_base == NULL) {
// ret = -ENOSPC;
// goto out_unref;
// }
DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start);
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
DRM_INFO(" pitch is %d\n", fb->pitch);
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
 
 
LEAVE();
/drivers/video/drm/radeon/radeon_fence.c
34,305 → 34,608
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/slab.h>
#include "drmP.h"
#include "drm.h"
#include <drm/drmP.h>
#include "radeon_reg.h"
#include "radeon.h"
 
static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
/*
* Fences
* Fences mark an event in the GPUs pipeline and are used
* for GPU/CPU synchronization. When the fence is written,
* it is expected that all buffers associated with that fence
* are no longer in use by the associated ring on the GPU and
* that the the relevant GPU caches have been flushed. Whether
* we use a scratch register or memory location depends on the asic
* and whether writeback is enabled.
*/
 
/**
* radeon_fence_write - write a fence value
*
* @rdev: radeon_device pointer
* @seq: sequence number to write
* @ring: ring index the fence is associated with
*
* Writes a fence value to memory or a scratch register (all asics).
*/
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
} else
WREG32(rdev->fence_drv.scratch_reg, seq);
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
*drv->cpu_addr = cpu_to_le32(seq);
} else {
WREG32(drv->scratch_reg, seq);
}
}
 
static u32 radeon_fence_read(struct radeon_device *rdev)
/**
* radeon_fence_read - read a fence value
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
* Reads a fence value from memory or a scratch register (all asics).
* Returns the value of the fence read from memory or register.
*/
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
u32 seq;
struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
u32 seq = 0;
 
if (rdev->wb.enabled) {
u32 scratch_index;
if (rdev->wb.use_event)
scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
else
scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
} else
seq = RREG32(rdev->fence_drv.scratch_reg);
if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
seq = le32_to_cpu(*drv->cpu_addr);
} else {
seq = RREG32(drv->scratch_reg);
}
return seq;
}
 
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
/**
* radeon_fence_emit - emit a fence on the requested ring
*
* @rdev: radeon_device pointer
* @fence: radeon fence object
* @ring: ring index the fence is associated with
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int radeon_fence_emit(struct radeon_device *rdev,
struct radeon_fence **fence,
int ring)
{
unsigned long irq_flags;
 
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (fence->emited) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
/* we are protected by the ring emission mutex */
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
(*fence)->ring = ring;
radeon_fence_ring_emit(rdev, ring, *fence);
// trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
return 0;
}
fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
if (!rdev->cp.ready)
/* FIXME: cp is not running assume everythings is done right
* away
 
/**
* radeon_fence_process - process a fence
*
* @rdev: radeon_device pointer
* @ring: ring index the fence is associated with
*
* Checks the current fence value and wakes the fence queue
* if the sequence number has increased (all asics).
*/
radeon_fence_write(rdev, fence->seq);
else
radeon_fence_ring_emit(rdev, fence);
void radeon_fence_process(struct radeon_device *rdev, int ring)
{
uint64_t seq, last_seq, last_emitted;
unsigned count_loop = 0;
bool wake = false;
 
// trace_radeon_fence_emit(rdev->ddev, fence->seq);
fence->emited = true;
list_move_tail(&fence->list, &rdev->fence_drv.emited);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
/* Note there is a scenario here for an infinite loop but it's
* very unlikely to happen. For it to happen, the current polling
* process need to be interrupted by another process and another
* process needs to update the last_seq btw the atomic read and
* xchg of the current process.
*
* More over for this to go in infinite loop there need to be
* continuously new fence signaled ie radeon_fence_read needs
* to return a different value each time for both the currently
* polling process and the other process that xchg the last_seq
* btw atomic read and xchg of the current process. And the
* value the other process set as last seq must be higher than
* the seq value we just read. Which means that current process
* need to be interrupted after radeon_fence_read and before
* atomic xchg.
*
* To be even more safe we count the number of time we loop and
* we bail after 10 loop just accepting the fact that we might
* have temporarly set the last_seq not to the true real last
* seq but to an older one.
*/
last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
do {
last_emitted = rdev->fence_drv[ring].sync_seq[ring];
seq = radeon_fence_read(rdev, ring);
seq |= last_seq & 0xffffffff00000000LL;
if (seq < last_seq) {
seq &= 0xffffffff;
seq |= last_emitted & 0xffffffff00000000LL;
}
 
static bool radeon_fence_poll_locked(struct radeon_device *rdev)
if (seq <= last_seq || seq > last_emitted) {
break;
}
/* If we loop over we don't want to return without
* checking if a fence is signaled as it means that the
* seq we just read is different from the previous on.
*/
wake = true;
last_seq = seq;
if ((count_loop++) > 10) {
/* We looped over too many time leave with the
* fact that we might have set an older fence
* seq then the current real last seq as signaled
* by the hw.
*/
break;
}
} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);
 
if (wake) {
rdev->fence_drv[ring].last_activity = GetTimerTicks();
wake_up_all(&rdev->fence_queue);
}
}
 
/**
* radeon_fence_destroy - destroy a fence
*
* @kref: fence kref
*
* Frees the fence object (all asics).
*/
static void radeon_fence_destroy(struct kref *kref)
{
struct radeon_fence *fence;
struct list_head *i, *n;
uint32_t seq;
bool wake = false;
unsigned long cjiffies;
 
seq = radeon_fence_read(rdev);
if (seq != rdev->fence_drv.last_seq) {
rdev->fence_drv.last_seq = seq;
rdev->fence_drv.last_jiffies = GetTimerTicks();
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = GetTimerTicks();
if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
cjiffies -= rdev->fence_drv.last_jiffies;
if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
/* update the timeout */
rdev->fence_drv.last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
rdev->fence_drv.last_timeout = 1;
fence = container_of(kref, struct radeon_fence, kref);
kfree(fence);
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
 
/**
* radeon_fence_seq_signaled - check if a fence sequeuce number has signaled
*
* @rdev: radeon device pointer
* @seq: sequence number
* @ring: ring index the fence is associated with
*
* Check if the last singled fence sequnce number is >= the requested
* sequence number (all asics).
* Returns true if the fence has signaled (current fence value
* is >= requested value) or false if it has not (current fence
* value is < the requested value. Helper function for
* radeon_fence_signaled().
*/
rdev->fence_drv.last_jiffies = cjiffies;
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
u64 seq, unsigned ring)
{
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
}
/* poll new last sequence at least once */
radeon_fence_process(rdev, ring);
if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
return true;
}
return false;
}
n = NULL;
list_for_each(i, &rdev->fence_drv.emited) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
break;
 
/**
* radeon_fence_signaled - check if a fence has signaled
*
* @fence: radeon fence object
*
* Check if the requested fence has signaled (all asics).
* Returns true if the fence has signaled or false if it has not.
*/
bool radeon_fence_signaled(struct radeon_fence *fence)
{
if (!fence) {
return true;
}
if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
return true;
}
/* all fence previous to this one are considered as signaled */
if (n) {
kevent_t event;
event.code = -1;
i = n;
do {
n = i->prev;
list_move_tail(i, &rdev->fence_drv.signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
// dbgprintf("fence %x done\n", fence);
RaiseEvent(fence->evnt, 0, &event);
i = n;
} while (i != &rdev->fence_drv.emited);
wake = true;
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return true;
}
return wake;
return false;
}
 
 
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
/**
* radeon_fence_wait_seq - wait for a specific sequence number
*
* @rdev: radeon device pointer
* @target_seq: sequence number we want to wait for
* @ring: ring index the fence is associated with
* @intr: use interruptable sleep
* @lock_ring: whether the ring should be locked or not
*
* Wait for the requested sequence number to be written (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait(), et al.
* Returns 0 if the sequence number has passed, error for all other cases.
* -EDEADLK is returned when a GPU lockup has been detected and the ring is
* marked as not ready so no further jobs get scheduled until a successful
* reset.
*/
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
unsigned ring, bool intr, bool lock_ring)
{
unsigned long irq_flags;
unsigned long timeout, last_activity;
uint64_t seq;
unsigned i;
bool signaled;
int r;
 
*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
if ((*fence) == NULL) {
return -ENOMEM;
while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
if (!rdev->ring[ring].ready) {
return -EBUSY;
}
 
(*fence)->evnt = CreateEvent(NULL, MANUAL_DESTROY);
// kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
(*fence)->emited = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
INIT_LIST_HEAD(&(*fence)->list);
timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
timeout = rdev->fence_drv[ring].last_activity - timeout;
} else {
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
* anyway we will just wait for the minimum amount and then check for a lockup
*/
timeout = 1;
}
seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
/* Save current last activity valuee, used to check for GPU lockups */
last_activity = rdev->fence_drv[ring].last_activity;
 
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
// trace_radeon_fence_wait_begin(rdev->ddev, seq);
radeon_irq_kms_sw_irq_get(rdev, ring);
// if (intr) {
// r = wait_event_interruptible_timeout(rdev->fence_queue,
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
// timeout);
// } else {
// r = wait_event_timeout(rdev->fence_queue,
// (signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
// timeout);
// }
delay(1);
 
radeon_irq_kms_sw_irq_put(rdev, ring);
// if (unlikely(r < 0)) {
// return r;
// }
// trace_radeon_fence_wait_end(rdev->ddev, seq);
 
if (unlikely(!signaled)) {
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r) {
continue;
}
 
/* check if sequence value has changed since last_activity */
if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
continue;
}
 
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
bool signaled = false;
if (lock_ring) {
mutex_lock(&rdev->ring_lock);
}
 
if (!fence)
return true;
/* test if somebody else has already decided that this is a lockup */
if (last_activity != rdev->fence_drv[ring].last_activity) {
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
continue;
}
 
if (fence->rdev->gpu_lockup)
return true;
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
target_seq, seq);
 
write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
signaled = true;
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = jiffies;
}
if (!fence->emited) {
WARN(1, "Querying an unemited fence : %p !\n", fence);
signaled = true;
 
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
if (!signaled) {
radeon_fence_poll_locked(fence->rdev);
signaled = fence->signaled;
return -EDEADLK;
}
write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
return signaled;
 
if (lock_ring) {
mutex_unlock(&rdev->ring_lock);
}
}
}
return 0;
}
 
/**
* radeon_fence_wait - wait for a fence to signal
*
* @fence: radeon fence object
* @intr: use interruptable sleep
*
* Wait for the requested fence to signal (all asics).
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the fence.
* Returns 0 if the fence has passed, error for all other cases.
*/
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
{
struct radeon_device *rdev;
unsigned long irq_flags, timeout;
u32 seq;
int r;
 
if (fence == NULL) {
WARN(1, "Querying an invalid fence : %p !\n", fence);
return 0;
return -EINVAL;
}
rdev = fence->rdev;
if (radeon_fence_signaled(fence)) {
 
r = radeon_fence_wait_seq(fence->rdev, fence->seq,
fence->ring, intr, true);
if (r) {
return r;
}
fence->seq = RADEON_FENCE_SIGNALED_SEQ;
return 0;
}
timeout = rdev->fence_drv.last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
seq = rdev->fence_drv.last_seq;
// trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
radeon_irq_kms_sw_irq_get(rdev);
// r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
// radeon_fence_signaled(fence), timeout);
 
WaitEvent(fence->evnt);
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
unsigned i;
 
radeon_irq_kms_sw_irq_put(rdev);
if (unlikely(r < 0)) {
return r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
return true;
}
}
return false;
}
 
/**
* radeon_fence_wait_any_seq - wait for a sequence number on any ring
*
* @rdev: radeon device pointer
* @target_seq: sequence number(s) we want to wait for
* @intr: use interruptable sleep
*
* Wait for the requested sequence number(s) to be written by any ring
* (all asics). Sequnce number array is indexed by ring id.
* @intr selects whether to use interruptable (true) or non-interruptable
* (false) sleep when waiting for the sequence number. Helper function
* for radeon_fence_wait_any(), et al.
* Returns 0 if the sequence number has passed, error for all other cases.
*/
static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
u64 *target_seq, bool intr)
{
unsigned long timeout, last_activity, tmp;
unsigned i, ring = RADEON_NUM_RINGS;
bool signaled;
int r;
 
for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
if (!target_seq[i]) {
continue;
}
 
/* use the most recent one as indicator */
if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
last_activity = rdev->fence_drv[i].last_activity;
}
 
/* For lockup detection just pick the lowest ring we are
* actively waiting for
*/
if (i < ring) {
ring = i;
}
}
 
/* nothing to wait for ? */
if (ring == RADEON_NUM_RINGS) {
return -ENOENT;
}
 
while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
timeout = GetTimerTicks() - RADEON_FENCE_JIFFIES_TIMEOUT;
if (time_after(last_activity, timeout)) {
/* the normal case, timeout is somewhere before last_activity */
timeout = last_activity - timeout;
} else {
radeon_irq_kms_sw_irq_get(rdev);
// r = wait_event_timeout(rdev->fence_drv.queue,
// radeon_fence_signaled(fence), timeout);
/* either jiffies wrapped around, or no fence was signaled in the last 500ms
* anyway we will just wait for the minimum amount and then check for a lockup
*/
timeout = 1;
}
 
WaitEvent(fence->evnt);
// trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (target_seq[i]) {
radeon_irq_kms_sw_irq_get(rdev, i);
}
}
 
radeon_irq_kms_sw_irq_put(rdev);
// WaitEvent(fence->evnt);
 
r = 1;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (target_seq[i]) {
radeon_irq_kms_sw_irq_put(rdev, i);
}
}
if (unlikely(r < 0)) {
return r;
}
// trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
/* we were interrupted for some reason and fence isn't
* isn't signaled yet, resume wait
*/
 
if (unlikely(!signaled)) {
/* we were interrupted for some reason and fence
* isn't signaled yet, resume waiting */
if (r) {
timeout = r;
goto retry;
continue;
}
/* don't protect read access to rdev->fence_drv.last_seq
* if we experiencing a lockup the value doesn't change
*/
if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
 
mutex_lock(&rdev->ring_lock);
for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
tmp = rdev->fence_drv[i].last_activity;
}
}
/* test if somebody else has already decided that this is a lockup */
if (last_activity != tmp) {
last_activity = tmp;
mutex_unlock(&rdev->ring_lock);
continue;
}
 
if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
/* good news we believe it's a lockup */
WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
/* FIXME: what should we do ? marking everyone
* as signaled for now
*/
rdev->gpu_lockup = true;
// r = radeon_gpu_reset(rdev);
// if (r)
// return r;
return true;
dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
target_seq[ring]);
 
// radeon_fence_write(rdev, fence->seq);
// rdev->gpu_lockup = false;
/* change last activity so nobody else think there is a lockup */
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
rdev->fence_drv[i].last_activity = GetTimerTicks();
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
rdev->fence_drv.last_jiffies = GetTimerTicks();
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
goto retry;
 
/* mark the ring as not ready any more */
rdev->ring[ring].ready = false;
mutex_unlock(&rdev->ring_lock);
return -EDEADLK;
}
mutex_unlock(&rdev->ring_lock);
}
}
return 0;
}
 
#if 0
int radeon_fence_wait_next(struct radeon_device *rdev)
/**
* radeon_fence_wait_any - wait for a fence to signal on any ring
*
* @rdev: radeon device pointer
* @fences: radeon fence object(s)
* @intr: use interruptable sleep
*
* Wait for any requested fence to signal (all asics). Fence
* array is indexed by ring id. @intr selects whether to use
* interruptable (true) or non-interruptable (false) sleep when
* waiting for the fences. Used by the suballocator.
* Returns 0 if any fence has passed, error for all other cases.
*/
int radeon_fence_wait_any(struct radeon_device *rdev,
struct radeon_fence **fences,
bool intr)
{
unsigned long irq_flags;
struct radeon_fence *fence;
uint64_t seq[RADEON_NUM_RINGS];
unsigned i;
int r;
 
if (rdev->gpu_lockup) {
return 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
seq[i] = 0;
 
if (!fences[i]) {
continue;
}
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (list_empty(&rdev->fence_drv.emited)) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
 
if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
/* something was allready signaled */
return 0;
}
fence = list_entry(rdev->fence_drv.emited.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
 
seq[i] = fences[i]->seq;
}
 
r = radeon_fence_wait_any_seq(rdev, seq, intr);
if (r) {
return r;
}
return 0;
}
 
int radeon_fence_wait_last(struct radeon_device *rdev)
/**
* radeon_fence_wait_next_locked - wait for the next fence to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
*
* Wait for the next fence on the requested ring to signal (all asics).
* Returns 0 if the next fence has passed, error for all other cases.
* Caller must hold ring lock.
*/
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
int r;
uint64_t seq;
 
if (rdev->gpu_lockup) {
return 0;
seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
if (seq >= rdev->fence_drv[ring].sync_seq[ring]) {
/* nothing to wait for, last_seq is
already the last emited fence */
return -ENOENT;
}
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
if (list_empty(&rdev->fence_drv.emited)) {
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return 0;
return radeon_fence_wait_seq(rdev, seq, ring, false, false);
}
fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
 
/**
* radeon_fence_wait_empty_locked - wait for all fences to signal
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
*
* Wait for all fences on the requested ring to signal (all asics).
* Returns 0 if the fences have passed, error for all other cases.
* Caller must hold ring lock.
*/
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
{
uint64_t seq = rdev->fence_drv[ring].sync_seq[ring];
 
while(1) {
int r;
r = radeon_fence_wait_seq(rdev, seq, ring, false, false);
if (r == -EDEADLK) {
mutex_unlock(&rdev->ring_lock);
r = radeon_gpu_reset(rdev);
mutex_lock(&rdev->ring_lock);
if (!r)
continue;
}
if (r) {
dev_err(rdev->dev, "error waiting for ring to become"
" idle (%d)\n", r);
}
return;
}
}
 
/**
* radeon_fence_ref - take a ref on a fence
*
* @fence: radeon fence object
*
* Take a reference on a fence (all asics).
* Returns the fence.
*/
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
kref_get(&fence->kref);
339,58 → 642,230
return fence;
}
 
#endif
 
/**
* radeon_fence_unref - remove a ref on a fence
*
* @fence: radeon fence object
*
* Remove a reference on a fence (all asics).
*/
void radeon_fence_unref(struct radeon_fence **fence)
{
unsigned long irq_flags;
struct radeon_fence *tmp = *fence;
 
*fence = NULL;
if (tmp) {
kref_put(&tmp->kref, radeon_fence_destroy);
}
}
 
if(tmp)
/**
* radeon_fence_count_emitted - get the count of emitted fences
*
* @rdev: radeon device pointer
* @ring: ring index the fence is associated with
*
* Get the number of fences emitted on the requested ring (all asics).
* Returns the number of emitted fences on the ring. Used by the
* dynpm code to ring track activity.
*/
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
write_lock_irqsave(&tmp->rdev->fence_drv.lock, irq_flags);
list_del(&tmp->list);
tmp->emited = false;
write_unlock_irqrestore(&tmp->rdev->fence_drv.lock, irq_flags);
};
uint64_t emitted;
 
/* We are not protected by ring lock when reading the last sequence
* but it's ok to report slightly wrong fence count here.
*/
radeon_fence_process(rdev, ring);
emitted = rdev->fence_drv[ring].sync_seq[ring]
- atomic64_read(&rdev->fence_drv[ring].last_seq);
/* to avoid 32bits warp around */
if (emitted > 0x10000000) {
emitted = 0x10000000;
}
return (unsigned)emitted;
}
 
void radeon_fence_process(struct radeon_device *rdev)
/**
* radeon_fence_need_sync - do we need a semaphore
*
* @fence: radeon fence object
* @dst_ring: which ring to check against
*
* Check if the fence needs to be synced against another ring
* (all asics). If so, we need to emit a semaphore.
* Returns true if we need to sync with another ring, false if
* not.
*/
bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
{
unsigned long irq_flags;
bool wake;
struct radeon_fence_driver *fdrv;
 
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
wake = radeon_fence_poll_locked(rdev);
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (!fence) {
return false;
}
 
int radeon_fence_driver_init(struct radeon_device *rdev)
if (fence->ring == dst_ring) {
return false;
}
 
/* we are protected by the ring mutex */
fdrv = &fence->rdev->fence_drv[dst_ring];
if (fence->seq <= fdrv->sync_seq[fence->ring]) {
return false;
}
 
return true;
}
 
/**
* radeon_fence_note_sync - record the sync point
*
* @fence: radeon fence object
* @dst_ring: which ring to check against
*
* Note the sequence number at which point the fence will
* be synced with the requested ring (all asics).
*/
void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
{
unsigned long irq_flags;
struct radeon_fence_driver *dst, *src;
unsigned i;
 
if (!fence) {
return;
}
 
if (fence->ring == dst_ring) {
return;
}
 
/* we are protected by the ring mutex */
src = &fence->rdev->fence_drv[fence->ring];
dst = &fence->rdev->fence_drv[dst_ring];
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (i == dst_ring) {
continue;
}
dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
}
}
 
/**
* radeon_fence_driver_start_ring - make the fence driver
* ready for use on the requested ring.
*
* @rdev: radeon device pointer
* @ring: ring index to start the fence driver on
*
* Make the fence driver ready for processing (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has.
* Returns 0 for success, errors for failure.
*/
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
uint64_t index;
int r;
 
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
if (rdev->wb.use_event) {
rdev->fence_drv[ring].scratch_reg = 0;
index = R600_WB_EVENT_OFFSET + ring * 4;
} else {
r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
if (r) {
dev_err(rdev->dev, "fence failed to get scratch register\n");
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
return r;
}
radeon_fence_write(rdev, 0);
atomic_set(&rdev->fence_drv.seq, 0);
INIT_LIST_HEAD(&rdev->fence_drv.created);
INIT_LIST_HEAD(&rdev->fence_drv.emited);
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
// init_waitqueue_head(&rdev->fence_drv.queue);
rdev->fence_drv.initialized = true;
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
index = RADEON_WB_SCRATCH_OFFSET +
rdev->fence_drv[ring].scratch_reg -
rdev->scratch.reg_base;
}
rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
rdev->fence_drv[ring].initialized = true;
dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
return 0;
}
 
/**
* radeon_fence_driver_init_ring - init the fence driver
* for the requested ring.
*
* @rdev: radeon device pointer
* @ring: ring index to start the fence driver on
*
* Init the fence driver for the requested ring (all asics).
* Helper function for radeon_fence_driver_init().
*/
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
int i;
 
rdev->fence_drv[ring].scratch_reg = -1;
rdev->fence_drv[ring].cpu_addr = NULL;
rdev->fence_drv[ring].gpu_addr = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
rdev->fence_drv[ring].sync_seq[i] = 0;
atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
rdev->fence_drv[ring].last_activity = jiffies;
rdev->fence_drv[ring].initialized = false;
}
 
/**
* radeon_fence_driver_init - init the fence driver
* for all possible rings.
*
* @rdev: radeon device pointer
*
* Init the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* radeon_fence_driver_start_ring().
* Returns 0 for success.
*/
int radeon_fence_driver_init(struct radeon_device *rdev)
{
int ring;
 
init_waitqueue_head(&rdev->fence_queue);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
radeon_fence_driver_init_ring(rdev, ring);
}
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
return 0;
}
 
/**
* radeon_fence_driver_fini - tear down the fence driver
* for all possible rings.
*
* @rdev: radeon device pointer
*
* Tear down the fence driver for all possible rings (all asics).
*/
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
int ring;
 
mutex_lock(&rdev->ring_lock);
for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
if (!rdev->fence_drv[ring].initialized)
continue;
radeon_fence_wait_empty_locked(rdev, ring);
wake_up_all(&rdev->fence_queue);
radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
rdev->fence_drv[ring].initialized = false;
}
mutex_unlock(&rdev->ring_lock);
}
 
 
/*
* Fence debugfs
*/
400,16 → 875,24
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_fence *fence;
int i, j;
 
seq_printf(m, "Last signaled fence 0x%08X\n",
radeon_fence_read(rdev));
if (!list_empty(&rdev->fence_drv.emited)) {
fence = list_entry(rdev->fence_drv.emited.prev,
struct radeon_fence, list);
seq_printf(m, "Last emited fence %p with 0x%08X\n",
fence, fence->seq);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!rdev->fence_drv[i].initialized)
continue;
 
seq_printf(m, "--- ring %d ---\n", i);
seq_printf(m, "Last signaled fence 0x%016llx\n",
(unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
seq_printf(m, "Last emitted 0x%016llx\n",
rdev->fence_drv[i].sync_seq[i]);
 
for (j = 0; j < RADEON_NUM_RINGS; ++j) {
if (i != j && rdev->fence_drv[j].initialized)
seq_printf(m, "Last sync to ring %d 0x%016llx\n",
j, rdev->fence_drv[i].sync_seq[j]);
}
}
return 0;
}
 
/drivers/video/drm/radeon/radeon_gart.c
25,8 → 25,8
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "radeon_reg.h"
 
43,8 → 43,38
}
 
/*
* GART
* The GART (Graphics Aperture Remapping Table) is an aperture
* in the GPU's address space. System pages can be mapped into
* the aperture and look like contiguous pages from the GPU's
* perspective. A page table maps the pages in the aperture
* to the actual backing pages in system memory.
*
* Radeon GPUs support both an internal GART, as described above,
* and AGP. AGP works similarly, but the GART table is configured
* and maintained by the northbridge rather than the driver.
* Radeon hw has a separate AGP aperture that is programmed to
* point to the AGP aperture provided by the northbridge and the
* requests are passed through to the northbridge aperture.
* Both AGP and internal GART can be used at the same time, however
* that is not currently supported by the driver.
*
* This file handles the common internal GART management.
*/
 
/*
* Common GART table functions.
*/
/**
* radeon_gart_table_ram_alloc - allocate system ram for gart page table
*
* @rdev: radeon_device pointer
*
* Allocate system memory for GART page table
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
* gart table to be in system memory.
* Returns 0 for success, -ENOMEM for failure.
*/
int radeon_gart_table_ram_alloc(struct radeon_device *rdev)
{
void *ptr;
61,38 → 91,54
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
rdev->gart.table.ram.ptr = ptr;
memset((void *)rdev->gart.table.ram.ptr, 0, rdev->gart.table_size);
rdev->gart.ptr = ptr;
memset((void *)rdev->gart.ptr, 0, rdev->gart.table_size);
return 0;
}
 
/**
* radeon_gart_table_ram_free - free system ram for gart page table
*
* @rdev: radeon_device pointer
*
* Free system memory for GART page table
* (r1xx-r3xx, non-pcie r4xx, rs400). These asics require the
* gart table to be in system memory.
*/
void radeon_gart_table_ram_free(struct radeon_device *rdev)
{
if (rdev->gart.table.ram.ptr == NULL) {
if (rdev->gart.ptr == NULL) {
return;
}
#ifdef CONFIG_X86
if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480 ||
rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
set_memory_wb((unsigned long)rdev->gart.table.ram.ptr,
set_memory_wb((unsigned long)rdev->gart.ptr,
rdev->gart.table_size >> PAGE_SHIFT);
}
#endif
// pci_free_consistent(rdev->pdev, rdev->gart.table_size,
// (void *)rdev->gart.table.ram.ptr,
// rdev->gart.table_addr);
rdev->gart.table.ram.ptr = NULL;
rdev->gart.ptr = NULL;
rdev->gart.table_addr = 0;
}
 
/**
* radeon_gart_table_vram_alloc - allocate vram for gart page table
*
* @rdev: radeon_device pointer
*
* Allocate video memory for GART page table
* (pcie r4xx, r5xx+). These asics require the
* gart table to be in video memory.
* Returns 0 for success, error for failure.
*/
int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
r = radeon_bo_create(rdev, rdev->gart.table_size,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->gart.table.vram.robj);
NULL, &rdev->gart.robj);
if (r) {
return r;
}
100,51 → 146,93
return 0;
}
 
/**
* radeon_gart_table_vram_pin - pin gart page table in vram
*
* @rdev: radeon_device pointer
*
* Pin the GART page table in vram so it will not be moved
* by the memory manager (pcie r4xx, r5xx+). These asics require the
* gart table to be in video memory.
* Returns 0 for success, error for failure.
*/
int radeon_gart_table_vram_pin(struct radeon_device *rdev)
{
uint64_t gpu_addr;
int r;
 
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
r = radeon_bo_reserve(rdev->gart.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->gart.table.vram.robj,
r = radeon_bo_pin(rdev->gart.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.robj);
return r;
}
r = radeon_bo_kmap(rdev->gart.table.vram.robj,
(void **)&rdev->gart.table.vram.ptr);
r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
if (r)
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
return r;
}
 
void radeon_gart_table_vram_free(struct radeon_device *rdev)
/**
* radeon_gart_table_vram_unpin - unpin gart page table in vram
*
* @rdev: radeon_device pointer
*
* Unpin the GART page table in vram (pcie r4xx, r5xx+).
* These asics require the gart table to be in video memory.
*/
void radeon_gart_table_vram_unpin(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
return;
}
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
r = radeon_bo_reserve(rdev->gart.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_bo_kunmap(rdev->gart.robj);
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.ptr = NULL;
}
radeon_bo_unref(&rdev->gart.table.vram.robj);
}
 
/**
* radeon_gart_table_vram_free - free gart page table vram
*
* @rdev: radeon_device pointer
*
* Free the video memory used for the GART page table
* (pcie r4xx, r5xx+). These asics require the gart table to
* be in video memory.
*/
void radeon_gart_table_vram_free(struct radeon_device *rdev)
{
if (rdev->gart.robj == NULL) {
return;
}
radeon_gart_table_vram_unpin(rdev);
radeon_bo_unref(&rdev->gart.robj);
}
 
 
 
/*
* Common gart functions.
*/
/**
* radeon_gart_unbind - unbind pages from the gart page table
*
* @rdev: radeon_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to unbind
*
* Unbinds the requested pages from the gart page table and
* replaces them with the dummy page (all asics).
*/
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
int pages)
{
154,7 → 242,7
u64 page_base;
 
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory to unitialized GART !\n");
WARN(1, "trying to unbind memory from uninitialized GART !\n");
return;
}
t = offset / RADEON_GPU_PAGE_SIZE;
167,7 → 255,9
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
if (rdev->gart.ptr) {
radeon_gart_set_page(rdev, t, page_base);
}
page_base += RADEON_GPU_PAGE_SIZE;
}
}
176,8 → 266,21
radeon_gart_tlb_flush(rdev);
}
 
/**
* radeon_gart_bind - bind pages into the gart page table
*
* @rdev: radeon_device pointer
* @offset: offset into the GPU's gart aperture
* @pages: number of pages to bind
* @pagelist: pages to bind
* @dma_addr: DMA addresses of pages
*
* Binds the requested pages to the gart page table
* (all asics).
* Returns 0 for success, -EINVAL for failure.
*/
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
int pages, u32_t *pagelist)
int pages, u32 *pagelist, dma_addr_t *dma_addr)
{
unsigned t;
unsigned p;
187,7 → 290,7
// dbgprintf("offset %x pages %d list %x\n",
// offset, pages, pagelist);
if (!rdev->gart.ready) {
WARN(1, "trying to bind memory to unitialized GART !\n");
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
t = offset / RADEON_GPU_PAGE_SIZE;
194,13 → 297,9
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
 
for (i = 0; i < pages; i++, p++) {
/* we need to support large memory configurations */
/* assume that unbind have already been call on the range */
 
rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
 
 
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
radeon_gart_set_page(rdev, t, page_base);
207,16 → 306,28
page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
mb();
radeon_gart_tlb_flush(rdev);
return 0;
}
 
/**
* radeon_gart_restore - bind all pages in the gart page table
*
* @rdev: radeon_device pointer
*
* Binds all pages in the gart page table (all asics).
* Used to rebuild the gart table on device startup or resume.
*/
void radeon_gart_restore(struct radeon_device *rdev)
{
int i, j, t;
u64 page_base;
 
if (!rdev->gart.ptr) {
return;
}
for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
page_base = rdev->gart.pages_addr[i];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
228,6 → 339,14
radeon_gart_tlb_flush(rdev);
}
 
/**
* radeon_gart_init - init the driver info for managing the gart
*
* @rdev: radeon_device pointer
*
* Allocate the dummy page and init the gart driver info (all asics).
* Returns 0 for success, error for failure.
*/
int radeon_gart_init(struct radeon_device *rdev)
{
int r, i;
249,14 → 368,13
DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
/* Allocate pages table */
rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
GFP_KERNEL);
rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
if (rdev->gart.pages == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages, GFP_KERNEL);
rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
rdev->gart.num_cpu_pages);
if (rdev->gart.pages_addr == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
268,6 → 386,13
return 0;
}
 
/**
* radeon_gart_fini - tear down the driver info for managing the gart
*
* @rdev: radeon_device pointer
*
* Tear down the gart driver info and free the dummy page (all asics).
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
275,8 → 400,909
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
vfree(rdev->gart.pages);
vfree(rdev->gart.pages_addr);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
}
 
/*
* GPUVM
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
* at any given time. The VM page tables can contain a mix
* vram pages and system memory pages and system memory pages
* can be mapped as snooped (cached system pages) or unsnooped
* (uncached system pages).
* Each VM has an ID associated with it and there is a page table
* associated with each VMID. When execting a command buffer,
* the kernel tells the the ring what VMID to use for that command
* buffer. VMIDs are allocated dynamically as commands are submitted.
* The userspace drivers maintain their own address space and the kernel
* sets up their pages tables accordingly when they submit their
* command buffers and a VMID is assigned.
* Cayman/Trinity support up to 8 active VMs at any given time;
* SI supports 16.
*/
 
/*
* vm helpers
*
* TODO bind a default page at vm initialization for default address
*/
 
/**
* radeon_vm_num_pde - return the number of page directory entries
*
* @rdev: radeon_device pointer
*
* Calculate the number of page directory entries (cayman+).
*/
static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
{
return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
}
 
/**
* radeon_vm_directory_size - returns the size of the page directory in bytes
*
* @rdev: radeon_device pointer
*
* Calculate the size of the page directory in bytes (cayman+).
*/
static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
{
return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
}
 
/**
* radeon_vm_manager_init - init the vm manager
*
* @rdev: radeon_device pointer
*
* Init the vm manager (cayman+).
* Returns 0 for success, error for failure.
*/
int radeon_vm_manager_init(struct radeon_device *rdev)
{
struct radeon_vm *vm;
struct radeon_bo_va *bo_va;
int r;
unsigned size;
 
if (!rdev->vm_manager.enabled) {
/* allocate enough for 2 full VM pts */
size = radeon_vm_directory_size(rdev);
size += rdev->vm_manager.max_pfn * 8;
size *= 2;
r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
RADEON_GPU_PAGE_ALIGN(size),
RADEON_GEM_DOMAIN_VRAM);
if (r) {
dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
(rdev->vm_manager.max_pfn * 8) >> 10);
return r;
}
 
r = radeon_asic_vm_init(rdev);
if (r)
return r;
 
rdev->vm_manager.enabled = true;
 
r = radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
if (r)
return r;
}
 
/* restore page table */
list_for_each_entry(vm, &rdev->vm_manager.lru_vm, list) {
if (vm->page_directory == NULL)
continue;
 
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
}
return 0;
}
 
/**
* radeon_vm_free_pt - free the page table for a specific vm
*
* @rdev: radeon_device pointer
* @vm: vm to unbind
*
* Free the page table of a specific vm (cayman+).
*
* Global and local mutex must be lock!
*/
static void radeon_vm_free_pt(struct radeon_device *rdev,
struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va;
int i;
 
if (!vm->page_directory)
return;
 
list_del_init(&vm->list);
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
 
list_for_each_entry(bo_va, &vm->va, vm_list) {
bo_va->valid = false;
}
 
if (vm->page_tables == NULL)
return;
 
for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
radeon_sa_bo_free(rdev, &vm->page_tables[i], vm->fence);
 
kfree(vm->page_tables);
}
 
/**
* radeon_vm_manager_fini - tear down the vm manager
*
* @rdev: radeon_device pointer
*
* Tear down the VM manager (cayman+).
*/
void radeon_vm_manager_fini(struct radeon_device *rdev)
{
struct radeon_vm *vm, *tmp;
int i;
 
if (!rdev->vm_manager.enabled)
return;
 
mutex_lock(&rdev->vm_manager.lock);
/* free all allocated page tables */
list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&vm->mutex);
}
for (i = 0; i < RADEON_NUM_VM; ++i) {
radeon_fence_unref(&rdev->vm_manager.active[i]);
}
radeon_asic_vm_fini(rdev);
mutex_unlock(&rdev->vm_manager.lock);
 
radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
rdev->vm_manager.enabled = false;
}
 
/**
* radeon_vm_evict - evict page table to make room for new one
*
* @rdev: radeon_device pointer
* @vm: VM we want to allocate something for
*
* Evict a VM from the lru, making sure that it isn't @vm. (cayman+).
* Returns 0 for success, -ENOMEM for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_vm *vm_evict;
 
if (list_empty(&rdev->vm_manager.lru_vm))
return -ENOMEM;
 
vm_evict = list_first_entry(&rdev->vm_manager.lru_vm,
struct radeon_vm, list);
if (vm_evict == vm)
return -ENOMEM;
 
mutex_lock(&vm_evict->mutex);
radeon_vm_free_pt(rdev, vm_evict);
mutex_unlock(&vm_evict->mutex);
return 0;
}
 
/**
* radeon_vm_alloc_pt - allocates a page table for a VM
*
* @rdev: radeon_device pointer
* @vm: vm to bind
*
* Allocate a page table for the requested vm (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm)
{
unsigned pd_size, pts_size;
u64 *pd_addr;
int r;
 
if (vm == NULL) {
return -EINVAL;
}
 
if (vm->page_directory != NULL) {
return 0;
}
 
retry:
pd_size = RADEON_GPU_PAGE_ALIGN(radeon_vm_directory_size(rdev));
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_directory, pd_size,
RADEON_GPU_PAGE_SIZE, false);
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
 
} else if (r) {
return r;
}
 
vm->pd_gpu_addr = radeon_sa_bo_gpu_addr(vm->page_directory);
 
/* Initially clear the page directory */
pd_addr = radeon_sa_bo_cpu_addr(vm->page_directory);
memset(pd_addr, 0, pd_size);
 
pts_size = radeon_vm_num_pdes(rdev) * sizeof(struct radeon_sa_bo *);
vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
 
if (vm->page_tables == NULL) {
DRM_ERROR("Cannot allocate memory for page table array\n");
radeon_sa_bo_free(rdev, &vm->page_directory, vm->fence);
return -ENOMEM;
}
 
return 0;
}
 
/**
* radeon_vm_add_to_lru - add VMs page table to LRU list
*
* @rdev: radeon_device pointer
* @vm: vm to add to LRU
*
* Add the allocated page table to the LRU list (cayman+).
*
* Global mutex must be locked!
*/
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm)
{
list_del_init(&vm->list);
list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
}
 
/**
* radeon_vm_grab_id - allocate the next free VMID
*
* @rdev: radeon_device pointer
* @vm: vm to allocate id for
* @ring: ring we want to submit job to
*
* Allocate an id for the vm (cayman+).
* Returns the fence we need to sync to (if any).
*
* Global and local mutex must be locked!
*/
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
struct radeon_vm *vm, int ring)
{
struct radeon_fence *best[RADEON_NUM_RINGS] = {};
unsigned choices[2] = {};
unsigned i;
 
/* check if the id is still valid */
if (vm->fence && vm->fence == rdev->vm_manager.active[vm->id])
return NULL;
 
/* we definately need to flush */
radeon_fence_unref(&vm->last_flush);
 
/* skip over VMID 0, since it is the system VM */
for (i = 1; i < rdev->vm_manager.nvm; ++i) {
struct radeon_fence *fence = rdev->vm_manager.active[i];
 
if (fence == NULL) {
/* found a free one */
vm->id = i;
return NULL;
}
 
if (radeon_fence_is_earlier(fence, best[fence->ring])) {
best[fence->ring] = fence;
choices[fence->ring == ring ? 0 : 1] = i;
}
}
 
for (i = 0; i < 2; ++i) {
if (choices[i]) {
vm->id = choices[i];
return rdev->vm_manager.active[choices[i]];
}
}
 
/* should never happen */
BUG();
return NULL;
}
 
/**
* radeon_vm_fence - remember fence for vm
*
* @rdev: radeon_device pointer
* @vm: vm we want to fence
* @fence: fence to remember
*
* Fence the vm (cayman+).
* Set the fence used to protect page table and id.
*
* Global and local mutex must be locked!
*/
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence)
{
radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
 
radeon_fence_unref(&vm->fence);
vm->fence = radeon_fence_ref(fence);
}
 
/**
* radeon_vm_bo_find - find the bo_va for a specific vm & bo
*
* @vm: requested vm
* @bo: requested buffer object
*
* Find @bo inside the requested vm (cayman+).
* Search inside the @bos vm list for the requested vm
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
*/
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
list_for_each_entry(bo_va, &bo->va, bo_list) {
if (bo_va->vm == vm) {
return bo_va;
}
}
return NULL;
}
 
/**
* radeon_vm_bo_add - add a bo to a specific vm
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
*
* Add @bo into the requested vm (cayman+).
* Add @bo to the list of bos associated with the vm
* Returns newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
if (bo_va == NULL) {
return NULL;
}
bo_va->vm = vm;
bo_va->bo = bo;
bo_va->soffset = 0;
bo_va->eoffset = 0;
bo_va->flags = 0;
bo_va->valid = false;
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->bo_list);
INIT_LIST_HEAD(&bo_va->vm_list);
 
mutex_lock(&vm->mutex);
list_add(&bo_va->vm_list, &vm->va);
list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
 
return bo_va;
}
 
/**
* radeon_vm_bo_set_addr - set bos virtual address inside a vm
*
* @rdev: radeon_device pointer
* @bo_va: bo_va to store the address
* @soffset: requested offset of the buffer in the VM address space
* @flags: attributes of pages (read/write/valid/etc.)
*
* Set offset of @bo_va (cayman+).
* Validate and set the offset requested within the vm address space.
* Returns 0 for success, error for failure.
*
* Object has to be reserved!
*/
int radeon_vm_bo_set_addr(struct radeon_device *rdev,
struct radeon_bo_va *bo_va,
uint64_t soffset,
uint32_t flags)
{
uint64_t size = radeon_bo_size(bo_va->bo);
uint64_t eoffset, last_offset = 0;
struct radeon_vm *vm = bo_va->vm;
struct radeon_bo_va *tmp;
struct list_head *head;
unsigned last_pfn;
 
if (soffset) {
/* make sure object fit at this offset */
eoffset = soffset + size;
if (soffset >= eoffset) {
return -EINVAL;
}
 
last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
if (last_pfn > rdev->vm_manager.max_pfn) {
dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
last_pfn, rdev->vm_manager.max_pfn);
return -EINVAL;
}
 
} else {
eoffset = last_pfn = 0;
}
 
mutex_lock(&vm->mutex);
head = &vm->va;
last_offset = 0;
list_for_each_entry(tmp, &vm->va, vm_list) {
if (bo_va == tmp) {
/* skip over currently modified bo */
continue;
}
 
if (soffset >= last_offset && eoffset <= tmp->soffset) {
/* bo can be added before this one */
break;
}
if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
/* bo and tmp overlap, invalid offset */
dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
(unsigned)tmp->soffset, (unsigned)tmp->eoffset);
mutex_unlock(&vm->mutex);
return -EINVAL;
}
last_offset = tmp->eoffset;
head = &tmp->vm_list;
}
 
bo_va->soffset = soffset;
bo_va->eoffset = eoffset;
bo_va->flags = flags;
bo_va->valid = false;
list_move(&bo_va->vm_list, head);
 
mutex_unlock(&vm->mutex);
return 0;
}
 
/**
* radeon_vm_map_gart - get the physical address of a gart page
*
* @rdev: radeon_device pointer
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
* to (cayman+).
* Returns the physical address of the page.
*/
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
{
uint64_t result;
 
/* page table offset */
result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
 
/* in case cpu page size != gpu page size*/
result |= addr & (~PAGE_MASK);
 
return result;
}
 
/**
* radeon_vm_update_pdes - make sure that page directory is valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
*
* Allocates new page tables if necessary
* and updates the page directory (cayman+).
* Returns 0 for success, error for failure.
*
* Global and local mutex must be locked!
*/
static int radeon_vm_update_pdes(struct radeon_device *rdev,
struct radeon_vm *vm,
uint64_t start, uint64_t end)
{
static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
 
uint64_t last_pde = ~0, last_pt = ~0;
unsigned count = 0;
uint64_t pt_idx;
int r;
 
start = (start / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
end = (end / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
 
/* walk over the address space and update the page directory */
for (pt_idx = start; pt_idx <= end; ++pt_idx) {
uint64_t pde, pt;
 
if (vm->page_tables[pt_idx])
continue;
 
retry:
r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager,
&vm->page_tables[pt_idx],
RADEON_VM_PTE_COUNT * 8,
RADEON_GPU_PAGE_SIZE, false);
 
if (r == -ENOMEM) {
r = radeon_vm_evict(rdev, vm);
if (r)
return r;
goto retry;
} else if (r) {
return r;
}
 
pde = vm->pd_gpu_addr + pt_idx * 8;
 
pt = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
 
if (((last_pde + 8 * count) != pde) ||
((last_pt + incr * count) != pt)) {
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pde,
last_pt, count, incr,
RADEON_VM_PAGE_VALID);
}
 
count = 1;
last_pde = pde;
last_pt = pt;
} else {
++count;
}
}
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pde, last_pt, count,
incr, RADEON_VM_PAGE_VALID);
 
}
 
return 0;
}
 
/**
* radeon_vm_update_ptes - make sure that page tables are valid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to
* @flags: mapping flags
*
* Update the page tables in the range @start - @end (cayman+).
*
* Global and local mutex must be locked!
*/
static void radeon_vm_update_ptes(struct radeon_device *rdev,
struct radeon_vm *vm,
uint64_t start, uint64_t end,
uint64_t dst, uint32_t flags)
{
static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
 
uint64_t last_pte = ~0, last_dst = ~0;
unsigned count = 0;
uint64_t addr;
 
start = start / RADEON_GPU_PAGE_SIZE;
end = end / RADEON_GPU_PAGE_SIZE;
 
/* walk over the address space and update the page tables */
for (addr = start; addr < end; ) {
uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
unsigned nptes;
uint64_t pte;
 
if ((addr & ~mask) == (end & ~mask))
nptes = end - addr;
else
nptes = RADEON_VM_PTE_COUNT - (addr & mask);
 
pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
pte += (addr & mask) * 8;
 
if ((last_pte + 8 * count) != pte) {
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pte,
last_dst, count,
RADEON_GPU_PAGE_SIZE,
flags);
}
 
count = nptes;
last_pte = pte;
last_dst = dst;
} else {
count += nptes;
}
 
addr += nptes;
dst += nptes * RADEON_GPU_PAGE_SIZE;
}
 
if (count) {
radeon_asic_vm_set_page(rdev, last_pte, last_dst, count,
RADEON_GPU_PAGE_SIZE, flags);
}
}
 
/**
* radeon_vm_bo_update_pte - map a bo into the vm page table
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
* @mem: ttm mem
*
* Fill in the page table entries for @bo (cayman+).
* Returns 0 for success, -EINVAL for failure.
*
* Object have to be reserved & global and local mutex must be locked!
*/
int radeon_vm_bo_update_pte(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_bo *bo,
struct ttm_mem_reg *mem)
{
unsigned ridx = rdev->asic->vm.pt_ring_index;
struct radeon_ring *ring = &rdev->ring[ridx];
struct radeon_semaphore *sem = NULL;
struct radeon_bo_va *bo_va;
unsigned nptes, npdes, ndw;
uint64_t addr;
int r;
 
/* nothing to do if vm isn't bound */
if (vm->page_directory == NULL)
return 0;
 
bo_va = radeon_vm_bo_find(vm, bo);
if (bo_va == NULL) {
dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
return -EINVAL;
}
 
if (!bo_va->soffset) {
dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
bo, vm);
return -EINVAL;
}
 
if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
return 0;
 
bo_va->flags &= ~RADEON_VM_PAGE_VALID;
bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
if (mem) {
addr = mem->start << PAGE_SHIFT;
if (mem->mem_type != TTM_PL_SYSTEM) {
bo_va->flags |= RADEON_VM_PAGE_VALID;
bo_va->valid = true;
}
if (mem->mem_type == TTM_PL_TT) {
bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
} else {
addr += rdev->vm_manager.vram_base_offset;
}
} else {
addr = 0;
bo_va->valid = false;
}
 
if (vm->fence && radeon_fence_signaled(vm->fence)) {
radeon_fence_unref(&vm->fence);
}
 
if (vm->fence && vm->fence->ring != ridx) {
r = radeon_semaphore_create(rdev, &sem);
if (r) {
return r;
}
}
 
nptes = radeon_bo_ngpu_pages(bo);
 
/* assume two extra pdes in case the mapping overlaps the borders */
npdes = (nptes >> RADEON_VM_BLOCK_SIZE) + 2;
 
/* estimate number of dw needed */
/* semaphore, fence and padding */
ndw = 32;
 
if (RADEON_VM_BLOCK_SIZE > 11)
/* reserve space for one header for every 2k dwords */
ndw += (nptes >> 11) * 4;
else
/* reserve space for one header for
every (1 << BLOCK_SIZE) entries */
ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
 
/* reserve space for pte addresses */
ndw += nptes * 2;
 
/* reserve space for one header for every 2k dwords */
ndw += (npdes >> 11) * 4;
 
/* reserve space for pde addresses */
ndw += npdes * 2;
 
r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
return r;
}
 
if (sem && radeon_fence_need_sync(vm->fence, ridx)) {
radeon_semaphore_sync_rings(rdev, sem, vm->fence->ring, ridx);
radeon_fence_note_sync(vm->fence, ridx);
}
 
r = radeon_vm_update_pdes(rdev, vm, bo_va->soffset, bo_va->eoffset);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
 
radeon_vm_update_ptes(rdev, vm, bo_va->soffset, bo_va->eoffset,
addr, bo_va->flags);
 
radeon_fence_unref(&vm->fence);
r = radeon_fence_emit(rdev, &vm->fence, ridx);
if (r) {
radeon_ring_unlock_undo(rdev, ring);
return r;
}
radeon_ring_unlock_commit(rdev, ring);
radeon_semaphore_free(rdev, &sem, vm->fence);
radeon_fence_unref(&vm->last_flush);
 
return 0;
}
 
/**
* radeon_vm_bo_rmv - remove a bo to a specific vm
*
* @rdev: radeon_device pointer
* @bo_va: requested bo_va
*
* Remove @bo_va->bo from the requested vm (cayman+).
* Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
* remove the ptes for @bo_va in the page table.
* Returns 0 for success.
*
* Object have to be reserved!
*/
int radeon_vm_bo_rmv(struct radeon_device *rdev,
struct radeon_bo_va *bo_va)
{
int r;
 
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&bo_va->vm->mutex);
r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
mutex_unlock(&rdev->vm_manager.lock);
list_del(&bo_va->vm_list);
mutex_unlock(&bo_va->vm->mutex);
list_del(&bo_va->bo_list);
 
kfree(bo_va);
return r;
}
 
/**
* radeon_vm_bo_invalidate - mark the bo as invalid
*
* @rdev: radeon_device pointer
* @vm: requested vm
* @bo: radeon buffer object
*
* Mark @bo as invalid (cayman+).
*/
void radeon_vm_bo_invalidate(struct radeon_device *rdev,
struct radeon_bo *bo)
{
struct radeon_bo_va *bo_va;
 
BUG_ON(!atomic_read(&bo->tbo.reserved));
list_for_each_entry(bo_va, &bo->va, bo_list) {
bo_va->valid = false;
}
}
 
/**
* radeon_vm_init - initialize a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Init @vm fields (cayman+).
*/
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
{
vm->id = 0;
vm->fence = NULL;
mutex_init(&vm->mutex);
INIT_LIST_HEAD(&vm->list);
INIT_LIST_HEAD(&vm->va);
}
 
/**
* radeon_vm_fini - tear down a vm instance
*
* @rdev: radeon_device pointer
* @vm: requested vm
*
* Tear down @vm (cayman+).
* Unbind the VM and remove all bos from the vm bo list
*/
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
{
struct radeon_bo_va *bo_va, *tmp;
int r;
 
mutex_lock(&rdev->vm_manager.lock);
mutex_lock(&vm->mutex);
radeon_vm_free_pt(rdev, vm);
mutex_unlock(&rdev->vm_manager.lock);
 
if (!list_empty(&vm->va)) {
dev_err(rdev->dev, "still active bo inside vm\n");
}
list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
list_del_init(&bo_va->vm_list);
r = radeon_bo_reserve(bo_va->bo, false);
if (!r) {
list_del_init(&bo_va->bo_list);
radeon_bo_unreserve(bo_va->bo);
kfree(bo_va);
}
}
radeon_fence_unref(&vm->fence);
radeon_fence_unref(&vm->last_flush);
mutex_unlock(&vm->mutex);
}
/drivers/video/drm/radeon/radeon_gem.c
25,14 → 25,14
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "drm.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
 
int radeon_gem_object_init(struct drm_gem_object *obj)
{
/* we do nothings here */
BUG();
 
return 0;
}
 
51,6 → 51,7
struct drm_gem_object **obj)
{
struct radeon_bo *robj;
unsigned long max_size;
int r;
 
*obj = NULL;
58,11 → 59,26
if (alignment < PAGE_SIZE) {
alignment = PAGE_SIZE;
}
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, &robj);
 
/* maximun bo size is the minimun btw visible vram and gtt size */
max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
if (size > max_size) {
printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
__func__, __LINE__, size >> 20, max_size >> 20);
return -ENOMEM;
}
 
retry:
r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
if (r) {
if (r != -ERESTARTSYS)
if (r != -ERESTARTSYS) {
if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
initial_domain |= RADEON_GEM_DOMAIN_GTT;
goto retry;
}
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
}
return r;
}
*obj = &robj->gem_base;
116,7 → 132,7
}
if (!domain) {
/* Do nothings */
printk(KERN_WARNING "Set domain withou domain !\n");
printk(KERN_WARNING "Set domain without domain !\n");
return 0;
}
if (domain == RADEON_GEM_DOMAIN_CPU) {
151,6 → 167,7
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
unsigned i;
 
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
 
159,8 → 176,9
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
RADEON_IB_POOL_SIZE*64*1024;
args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
for(i = 0; i < RADEON_NUM_RINGS; ++i)
args->gart_size -= rdev->ring[i].ring_size;
return 0;
}
 
189,6 → 207,7
uint32_t handle;
int r;
 
down_read(&rdev->exclusive_lock);
/* create a gem object to contain this object in */
args->size = roundup(args->size, PAGE_SIZE);
r = radeon_gem_object_create(rdev, args->size, args->alignment,
195,6 → 214,8
args->initial_domain, false,
false, &gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
r = drm_gem_handle_create(filp, gobj, &handle);
201,9 → 222,12
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
args->handle = handle;
up_read(&rdev->exclusive_lock);
return 0;
}
 
212,6 → 236,7
{
/* transition the BO to a domain -
* just validate the BO into a certain domain */
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_set_domain *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
219,10 → 244,12
 
/* for now if someone requests domain CPU -
* just make sure the buffer is finished with */
down_read(&rdev->exclusive_lock);
 
/* just do a BO wait for now */
gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) {
up_read(&rdev->exclusive_lock);
return -ENOENT;
}
robj = gem_to_radeon_bo(gobj);
230,6 → 257,8
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
 
drm_gem_object_unreference_unlocked(gobj);
up_read(&rdev->exclusive_lock);
r = radeon_gem_handle_lockup(robj->rdev, r);
return r;
}
 
261,6 → 290,7
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_busy *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
286,6 → 316,7
break;
}
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
 
292,6 → 323,7
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_wait_idle *args = data;
struct drm_gem_object *gobj;
struct radeon_bo *robj;
304,9 → 336,10
robj = gem_to_radeon_bo(gobj);
r = radeon_bo_wait(robj, NULL, false);
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
if (rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(rdev, robj);
drm_gem_object_unreference_unlocked(gobj);
r = radeon_gem_handle_lockup(rdev, r);
return r;
}
 
/drivers/video/drm/radeon/radeon_i2c.c
23,16 → 23,23
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "radeon_drm.h"
#include <linux/export.h>
 
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
 
extern int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num);
extern u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap);
 
/**
* radeon_ddc_probe
*
*/
bool radeon_ddc_probe(struct radeon_connector *radeon_connector, bool requires_extended_probe)
bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
{
u8 out = 0x0;
u8 buf[8];
39,23 → 46,19
int ret;
struct i2c_msg msgs[] = {
{
.addr = 0x50,
.addr = DDC_ADDR,
.flags = 0,
.len = 1,
.buf = &out,
},
{
.addr = 0x50,
.addr = DDC_ADDR,
.flags = I2C_M_RD,
.len = 1,
.len = 8,
.buf = buf,
}
};
 
/* Read 8 bytes from i2c for extended probe of EDID header */
if (requires_extended_probe)
msgs[1].len = 8;
 
/* on hw with routers, select right port */
if (radeon_connector->router.ddc_valid)
radeon_router_select_ddc_port(radeon_connector);
64,7 → 67,6
if (ret != 2)
/* Couldn't find an accessible DDC on this connector */
return false;
if (requires_extended_probe) {
/* Probe also for valid EDID header
* EDID header starts with:
* 0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x00.
75,14 → 77,14
* connector */
return false;
}
}
return true;
}
 
/* bit banging i2c */
 
static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
static int pre_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
137,19 → 139,30
WREG32(rec->en_data_reg, temp);
 
/* mask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg) | rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
if (lock_state)
temp |= rec->mask_clk_mask;
else
temp &= ~rec->mask_clk_mask;
 
temp = RREG32(rec->mask_data_reg) | rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
 
return 0;
}
 
static void post_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
uint32_t temp;
 
/* unmask the gpio pins for software use */
temp = RREG32(rec->mask_clk_reg) & ~rec->mask_clk_mask;
WREG32(rec->mask_clk_reg, temp);
temp = RREG32(rec->mask_clk_reg);
 
temp = RREG32(rec->mask_data_reg);
if (lock_state)
temp |= rec->mask_data_mask;
else
temp &= ~rec->mask_data_mask;
temp = RREG32(rec->mask_data_reg) & ~rec->mask_data_mask;
WREG32(rec->mask_data_reg, temp);
temp = RREG32(rec->mask_data_reg);
}
209,22 → 222,6
WREG32(rec->en_data_reg, val);
}
 
static int pre_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 
radeon_i2c_do_lock(i2c, 1);
 
return 0;
}
 
static void post_xfer(struct i2c_adapter *i2c_adap)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
 
radeon_i2c_do_lock(i2c, 0);
}
 
/* hw i2c */
 
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
890,6 → 887,11
.functionality = radeon_hw_i2c_func,
};
 
static const struct i2c_algorithm radeon_atom_i2c_algo = {
.master_xfer = radeon_atom_hw_i2c_xfer,
.functionality = radeon_atom_hw_i2c_func,
};
 
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
898,12 → 900,17
struct radeon_i2c_chan *i2c;
int ret;
 
/* don't add the mm_i2c bus unless hw_i2c is enabled */
if (rec->mm_i2c && (radeon_hw_i2c == 0))
return NULL;
 
i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
if (i2c == NULL)
return NULL;
 
i2c->rec = *rec;
// i2c->adapter.owner = THIS_MODULE;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
if (rec->mm_i2c ||
920,6 → 927,18
// DRM_ERROR("Failed to register hw i2c %s\n", name);
// goto out_free;
// }
} else if (rec->hw_capable &&
radeon_hw_i2c &&
ASIC_IS_DCE3(rdev)) {
/* hw i2c using atom */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon i2c hw bus %s", name);
i2c->adapter.algo = &radeon_atom_i2c_algo;
// ret = i2c_add_adapter(&i2c->adapter);
// if (ret) {
// DRM_ERROR("Failed to register hw i2c %s\n", name);
// goto out_free;
// }
} else {
/* set the radeon bit adapter */
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
931,10 → 950,8
i2c->algo.bit.setscl = set_clock;
i2c->algo.bit.getsda = get_data;
i2c->algo.bit.getscl = get_clock;
i2c->algo.bit.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
i2c->algo.bit.timeout = 2;
i2c->algo.bit.udelay = 10;
i2c->algo.bit.timeout = usecs_to_jiffies(2200); /* from VESA */
i2c->algo.bit.data = i2c;
ret = i2c_bit_add_bus(&i2c->adapter);
if (ret) {
962,7 → 979,7
return NULL;
 
i2c->rec = *rec;
// i2c->adapter.owner = THIS_MODULE;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
i2c->dev = dev;
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
/drivers/video/drm/radeon/radeon_irq_kms.c
25,55 → 25,93
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
 
#define RADEON_WAIT_IDLE_TIMEOUT 200
 
struct radeon_device *main_device;
 
extern int irq_override;
 
 
/**
* radeon_driver_irq_handler_kms - irq handler for KMS
*
* @DRM_IRQ_ARGS: args
*
* This is the irq handler for the radeon KMS driver (all asics).
* radeon_irq_process is a macro that points to the per-asic
* irq handler callback.
*/
void irq_handler_kms()
{
// dbgprintf("%s\n",__FUNCTION__);
radeon_irq_process(main_device);
}
 
 
static void radeon_irq_preinstall(struct radeon_device *rdev)
/**
* radeon_driver_irq_preinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Gets the hw ready to enable irqs (all asics).
* This function disables all interrupt sources on the GPU.
*/
void radeon_irq_preinstall_kms(struct radeon_device *rdev)
{
unsigned long irqflags;
unsigned i;
 
spin_lock_irqsave(&rdev->irq.lock, irqflags);
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
rdev->irq.gui_idle = false;
for (i = 0; i < rdev->num_crtc; i++)
for (i = 0; i < RADEON_NUM_RINGS; i++)
atomic_set(&rdev->irq.ring_int[i], 0);
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
for (i = 0; i < RADEON_MAX_CRTCS; i++) {
rdev->irq.crtc_vblank_int[i] = false;
for (i = 0; i < 6; i++) {
rdev->irq.hpd[i] = false;
rdev->irq.pflip[i] = false;
atomic_set(&rdev->irq.pflip[i], 0);
rdev->irq.afmt[i] = false;
}
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
/* Clear bits */
radeon_irq_process(rdev);
}
 
int radeon_driver_irq_postinstall(struct radeon_device *rdev)
/**
* radeon_driver_irq_postinstall_kms - drm irq preinstall callback
*
* @dev: drm dev pointer
*
* Handles stuff to be done after enabling irqs (all asics).
* Returns 0 on success.
*/
 
int radeon_driver_irq_postinstall_kms(struct radeon_device *rdev)
{
// struct radeon_device *rdev = dev->dev_private;
 
// dev->max_vblank_count = 0x001fffff;
rdev->irq.sw_int = true;
 
radeon_irq_set(rdev);
return 0;
}
 
/**
* radeon_irq_kms_init - init driver interrupt info
*
* @rdev: radeon device pointer
*
* Sets up the work irq handlers, vblank init, MSIs, etc. (all asics).
* Returns 0 for success, error for failure.
*/
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int i;
int irq_line;
int r = 0;
 
80,20 → 118,27
ENTER();
 
// INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
// INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi);
 
spin_lock_init(&rdev->irq.sw_lock);
for (i = 0; i < rdev->num_crtc; i++)
spin_lock_init(&rdev->irq.pflip_lock[i]);
spin_lock_init(&rdev->irq.lock);
// r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
// if (r) {
// return r;
// }
/* enable msi */
rdev->msi_enabled = 0;
 
rdev->msi_enabled = 0;
// if (radeon_msi_ok(rdev)) {
// int ret = pci_enable_msi(rdev->pdev);
// if (!ret) {
// rdev->msi_enabled = 1;
// dev_info(rdev->dev, "radeon: using MSI.\n");
// }
// }
rdev->irq.installed = true;
main_device = rdev;
 
radeon_irq_preinstall(rdev);
radeon_irq_preinstall_kms(rdev);
 
if (irq_override)
irq_line = irq_override;
106,41 → 151,81
 
// r = drm_irq_install(rdev->ddev);
 
r = radeon_driver_irq_postinstall(rdev);
r = radeon_driver_irq_postinstall_kms(rdev);
if (r) {
rdev->irq.installed = false;
LEAVE();
return r;
}
 
DRM_INFO("radeon: irq initialized.\n");
return 0;
}
 
/**
* radeon_irq_kms_fini - tear down driver interrrupt info
*
* @rdev: radeon device pointer
*
* Tears down the work irq handlers, vblank handlers, MSIs, etc. (all asics).
*/
void radeon_irq_kms_fini(struct radeon_device *rdev)
{
// drm_vblank_cleanup(rdev->ddev);
if (rdev->irq.installed) {
// drm_irq_uninstall(rdev->ddev);
rdev->irq.installed = false;
// if (rdev->msi_enabled)
// pci_disable_msi(rdev->pdev);
}
// flush_work(&rdev->hotplug_work);
}
 
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
/**
* radeon_irq_kms_sw_irq_get - enable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to enable
*
* Enables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
 
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
rdev->irq.sw_int = true;
if (!rdev->ddev->irq_enabled)
return;
 
if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
 
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
/**
* radeon_irq_kms_sw_irq_put - disable software interrupt
*
* @rdev: radeon device pointer
* @ring: ring whose interrupt you want to disable
*
* Disables the software interrupt for a specific ring (all asics).
* The software interrupt is generally used to signal a fence on
* a particular ring.
*/
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
 
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
rdev->irq.sw_int = false;
if (!rdev->ddev->irq_enabled)
return;
 
if (atomic_dec_and_test(&rdev->irq.ring_int[ring])) {
spin_lock_irqsave(&rdev->irq.lock, irqflags);
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
 
 
/drivers/video/drm/radeon/radeon_legacy_crtc.c
206,11 → 206,6
WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp);
}
 
void radeon_restore_common_regs(struct drm_device *dev)
{
/* don't need this yet */
}
 
static void radeon_pll_wait_for_read_update_complete(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
295,7 → 290,7
return 1;
}
 
void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
419,6 → 414,7
r = radeon_bo_reserve(rbo, false);
if (unlikely(r != 0))
return r;
/* Only 27 bit offset for legacy CRTC */
r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
if (unlikely(r != 0)) {
radeon_bo_unreserve(rbo);
437,7 → 433,7
 
crtc_offset_cntl = 0;
 
pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
988,15 → 984,9
}
 
static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
 
/* adjust pm to upcoming mode change */
radeon_pm_compute_clocks(rdev);
 
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
return false;
return true;
1029,9 → 1019,11
 
static void radeon_crtc_prepare(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
 
radeon_crtc->in_mode_set = true;
/*
* The hardware wedges sometimes if you reconfigure one CRTC
* whilst another is running (see fdo bug #24611).
1042,6 → 1034,7
 
static void radeon_crtc_commit(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct drm_crtc *crtci;
 
1052,6 → 1045,7
if (crtci->enabled)
radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
}
radeon_crtc->in_mode_set = false;
}
 
static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
/drivers/video/drm/radeon/radeon_legacy_encoders.c
23,11 → 23,15
* Authors: Dave Airlie
* Alex Deucher
*/
#include "drmP.h"
#include "drm_crtc_helper.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include "radeon.h"
#include "atom.h"
#include <linux/backlight.h>
#ifdef CONFIG_PMAC_BACKLIGHT
#include <asm/backlight.h>
#endif
 
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder)
{
84,7 → 88,7
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
udelay(1000);
mdelay(1);
 
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
97,7 → 101,7
(backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
if (is_mac)
lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
udelay(panel_pwr_delay * 1000);
mdelay(panel_pwr_delay);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
break;
case DRM_MODE_DPMS_STANDBY:
114,10 → 118,10
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
}
udelay(panel_pwr_delay * 1000);
mdelay(panel_pwr_delay);
WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
udelay(panel_pwr_delay * 1000);
mdelay(panel_pwr_delay);
break;
}
 
240,7 → 244,7
}
 
static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
265,15 → 269,49
.disable = radeon_legacy_encoder_disable,
};
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
u8
radeon_legacy_get_backlight_level(struct radeon_encoder *radeon_encoder)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
u8 backlight_level;
 
#define MAX_RADEON_LEVEL 0xFF
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
struct radeon_backlight_privdata {
struct radeon_encoder *encoder;
uint8_t negative;
};
return backlight_level;
}
 
void
radeon_legacy_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
{
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
int dpms_mode = DRM_MODE_DPMS_ON;
 
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
if (lvds->backlight_level > 0)
dpms_mode = lvds->dpms_mode;
else
dpms_mode = DRM_MODE_DPMS_OFF;
lvds->backlight_level = level;
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
if (lvds->backlight_level > 0)
dpms_mode = lvds->dpms_mode;
else
dpms_mode = DRM_MODE_DPMS_OFF;
lvds->backlight_level = level;
}
}
 
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
}
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
static uint8_t radeon_legacy_lvds_level(struct backlight_device *bd)
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
282,13 → 320,13
/* Convert brightness to hardware level */
if (bd->props.brightness < 0)
level = 0;
else if (bd->props.brightness > MAX_RADEON_LEVEL)
level = MAX_RADEON_LEVEL;
else if (bd->props.brightness > RADEON_MAX_BL_LEVEL)
level = RADEON_MAX_BL_LEVEL;
else
level = bd->props.brightness;
 
if (pdata->negative)
level = MAX_RADEON_LEVEL - level;
level = RADEON_MAX_BL_LEVEL - level;
 
return level;
}
297,27 → 335,10
{
struct radeon_backlight_privdata *pdata = bl_get_data(bd);
struct radeon_encoder *radeon_encoder = pdata->encoder;
struct drm_device *dev = radeon_encoder->base.dev;
struct radeon_device *rdev = dev->dev_private;
int dpms_mode = DRM_MODE_DPMS_ON;
 
if (radeon_encoder->enc_priv) {
if (rdev->is_atom_bios) {
struct radeon_encoder_atom_dig *lvds = radeon_encoder->enc_priv;
dpms_mode = lvds->dpms_mode;
lvds->backlight_level = radeon_legacy_lvds_level(bd);
} else {
struct radeon_encoder_lvds *lvds = radeon_encoder->enc_priv;
dpms_mode = lvds->dpms_mode;
lvds->backlight_level = radeon_legacy_lvds_level(bd);
}
}
radeon_legacy_set_backlight_level(radeon_encoder,
radeon_legacy_lvds_level(bd));
 
if (bd->props.brightness > 0)
radeon_legacy_lvds_update(&radeon_encoder->base, dpms_mode);
else
radeon_legacy_lvds_update(&radeon_encoder->base, DRM_MODE_DPMS_OFF);
 
return 0;
}
 
332,7 → 353,7
backlight_level = (RREG32(RADEON_LVDS_GEN_CNTL) >>
RADEON_LVDS_BL_MOD_LEVEL_SHIFT) & 0xff;
 
return pdata->negative ? MAX_RADEON_LEVEL - backlight_level : backlight_level;
return pdata->negative ? RADEON_MAX_BL_LEVEL - backlight_level : backlight_level;
}
 
static const struct backlight_ops radeon_backlight_ops = {
349,6 → 370,7
struct backlight_properties props;
struct radeon_backlight_privdata *pdata;
uint8_t backlight_level;
char bl_name[16];
 
if (!radeon_encoder->enc_priv)
return;
365,9 → 387,12
goto error;
}
 
props.max_brightness = MAX_RADEON_LEVEL;
memset(&props, 0, sizeof(props));
props.max_brightness = RADEON_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
snprintf(bl_name, sizeof(bl_name),
"radeon_bl%d", dev->primary->index);
bd = backlight_device_register(bl_name, &drm_connector->kdev,
pdata, &radeon_backlight_ops, &props);
if (IS_ERR(bd)) {
DRM_ERROR("Backlight registration failed\n");
444,7 → 469,7
}
 
if (bd) {
struct radeon_legacy_backlight_privdata *pdata;
struct radeon_backlight_privdata *pdata;
 
pdata = bl_get_data(bd);
backlight_device_unregister(bd);
652,7 → 677,7
 
WREG32(RADEON_DAC_MACRO_CNTL, tmp);
 
udelay(2000);
mdelay(2);
 
if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
found = connector_status_connected;
969,11 → 994,7
static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
if (tmds) {
if (tmds->i2c_bus)
radeon_i2c_destroy(tmds->i2c_bus);
}
/* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */
kfree(radeon_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(radeon_encoder);
1495,7 → 1516,7
tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
WREG32(RADEON_DAC_CNTL2, tmp);
 
udelay(10000);
mdelay(10);
 
if (ASIC_IS_R300(rdev)) {
if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
/drivers/video/drm/radeon/radeon_legacy_tv.c
1,5 → 1,5
#include "drmP.h"
#include "drm_crtc_helper.h"
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "radeon.h"
 
/*
864,7 → 864,7
*v_sync_strt_wid = tmp;
}
 
static inline int get_post_div(int value)
static int get_post_div(int value)
{
int post_div;
switch (value) {
/drivers/video/drm/radeon/radeon_mode.h
30,12 → 30,11
#ifndef RADEON_MODE_H
#define RADEON_MODE_H
 
#include <drm_crtc.h>
#include <drm_mode.h>
#include <drm_edid.h>
#include <drm_dp_helper.h>
#include <drm_fixed.h>
#include <drm_crtc_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_fixed.h>
#include <drm/drm_crtc_helper.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
 
210,6 → 209,7
CT_RN50_POWER,
CT_MAC_X800,
CT_MAC_G5_9600,
CT_SAM440EP
};
 
enum radeon_dvo_chip {
219,6 → 219,13
 
struct radeon_fbdev;
 
struct radeon_afmt {
bool enabled;
int offset;
bool last_buffer_filled_status;
int id;
};
 
struct radeon_mode_info {
struct atom_context *atom_context;
struct card_info *atom_card_info;
225,6 → 232,7
enum radeon_connector_table connector_table;
bool mode_config_initialized;
struct radeon_crtc *crtcs[6];
struct radeon_afmt *afmt[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
243,8 → 251,23
 
/* pointer to fbdev info structure */
struct radeon_fbdev *rfbdev;
/* firmware flags */
u16 firmware_flags;
/* pointer to backlight encoder */
struct radeon_encoder *bl_encoder;
};
 
#define RADEON_MAX_BL_LEVEL 0xFF
 
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
 
struct radeon_backlight_privdata {
struct radeon_encoder *encoder;
uint8_t negative;
};
 
#endif
 
#define MAX_H_CODE_TIMING_LEN 32
#define MAX_V_CODE_TIMING_LEN 32
 
260,6 → 283,18
uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN];
};
 
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
/* asic_ss */
uint16_t rate;
uint16_t amount;
};
 
struct radeon_crtc {
struct drm_crtc base;
int crtc_id;
266,6 → 301,7
u16 lut_r[256], lut_g[256], lut_b[256];
bool enabled;
bool can_tile;
bool in_mode_set;
uint32_t crtc_offset;
struct drm_gem_object *cursor_bo;
uint64_t cursor_addr;
281,6 → 317,16
struct drm_display_mode native_mode;
int pll_id;
int deferred_flip_completion;
/* pll sharing */
struct radeon_atom_ss ss;
bool ss_enabled;
u32 adjusted_clock;
int bpc;
u32 pll_reference_div;
u32 pll_post_div;
u32 pll_flags;
struct drm_encoder *encoder;
struct drm_connector *connector;
};
 
struct radeon_encoder_primary_dac {
334,18 → 380,6
};
 
/* spread spectrum */
struct radeon_atom_ss {
uint16_t percentage;
uint8_t type;
uint16_t step;
uint8_t delay;
uint8_t range;
uint8_t refdiv;
/* asic_ss */
uint16_t rate;
uint16_t amount;
};
 
struct radeon_encoder_atom_dig {
bool linkb;
/* atom dig */
360,6 → 394,8
struct backlight_device *bl_dev;
int dpms_mode;
uint8_t backlight_level;
int panel_mode;
struct radeon_afmt *afmt;
};
 
struct radeon_encoder_atom_dac {
381,10 → 417,6
struct drm_display_mode native_mode;
void *enc_priv;
int audio_polling_active;
int hdmi_offset;
int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
bool is_ext_encoder;
u16 caps;
};
436,9 → 468,6
struct radeon_i2c_chan *ddc_bus;
/* some systems have an hdmi and vga port with a shared ddc line */
bool shared_ddc;
/* for some Radeon chip families we apply an additional EDID header
check as part of the DDC probe */
bool requires_extended_probe;
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
445,6 → 474,7
struct edid *edid;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
uint16_t connector_object_id;
struct radeon_hpd hpd;
struct radeon_router router;
456,6 → 486,8
struct drm_gem_object *obj;
};
 
#define ENCODER_MODE_IS_DP(em) (((em) == ATOM_ENCODER_MODE_DP) || \
((em) == ATOM_ENCODER_MODE_DP_MST))
 
extern enum radeon_tv_std
radeon_combios_get_tv_info(struct radeon_device *rdev);
464,28 → 496,37
 
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
extern struct drm_connector *
radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
u32 pixel_clock);
 
extern bool radeon_encoder_is_dp_bridge(struct drm_encoder *encoder);
extern bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
extern bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector);
extern bool radeon_connector_is_dp12_capable(struct drm_connector *connector);
extern int radeon_get_monitor_bpc(struct drm_connector *connector);
 
extern void radeon_connector_hotplug(struct drm_connector *connector);
extern int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct drm_display_mode *mode);
extern void radeon_dp_set_link_config(struct drm_connector *connector,
struct drm_display_mode *mode);
const struct drm_display_mode *mode);
extern void radeon_dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
struct drm_connector *connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
extern void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
extern struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder);
extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
u8 write_byte, u8 *read_byte);
 
515,8 → 556,7
u8 val);
extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector,
bool requires_extended_probe);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
extern struct drm_encoder *radeon_best_encoder(struct drm_connector *connector);
639,9 → 679,9
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
void radeon_framebuffer_init(struct drm_device *dev,
int radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
struct drm_mode_fb_cmd *mode_cmd,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
 
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
661,7 → 701,7
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
void radeon_combios_asic_init(struct drm_device *dev);
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
/drivers/video/drm/radeon/radeon_object.h
31,6 → 31,8
#include <drm/radeon_drm.h>
#include "radeon.h"
 
struct sg_table;
 
/**
* radeon_mem_type_to_domain - return domain corresponding to mem_type
* @mem_type: ttm memory type
52,16 → 54,7
return 0;
}
 
/**
* radeon_bo_reserve - reserve bo
* @bo: bo structure
* @no_wait: don't sleep while trying to reserve (return -EBUSY)
*
* Returns:
* -EBUSY: buffer is busy and @no_wait is true
* -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
* a signal. Release all buffer reservations and return to user-space.
*/
int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr);
 
static inline void radeon_bo_unreserve(struct radeon_bo *bo)
{
92,6 → 85,16
return !!atomic_read(&bo->tbo.reserved);
}
 
static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
{
return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
}
 
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
{
return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
}
 
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
106,32 → 109,20
return bo->tbo.addr_space_offset;
}
 
static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait)
{
int r;
extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
bool no_wait);
 
r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
if (unlikely(r != 0))
return r;
// spin_lock(&bo->tbo.bdev->fence_lock);
if (mem_type)
*mem_type = bo->tbo.mem.mem_type;
if (bo->tbo.sync_obj)
r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
// spin_unlock(&bo->tbo.bdev->fence_lock);
ttm_bo_unreserve(&bo->tbo);
return r;
}
 
extern int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align,
bool kernel, u32 domain,
struct sg_table *sg,
struct radeon_bo **bo_ptr);
extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
extern void radeon_bo_kunmap(struct radeon_bo *bo);
extern void radeon_bo_unref(struct radeon_bo **bo);
extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain,
u64 max_offset, u64 *gpu_addr);
extern int radeon_bo_unpin(struct radeon_bo *bo);
extern int radeon_bo_evict_vram(struct radeon_device *rdev);
extern void radeon_bo_force_delete(struct radeon_device *rdev);
152,4 → 143,41
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 
/*
* sub allocation
*/
 
static inline uint64_t radeon_sa_bo_gpu_addr(struct radeon_sa_bo *sa_bo)
{
return sa_bo->manager->gpu_addr + sa_bo->soffset;
}
 
static inline void * radeon_sa_bo_cpu_addr(struct radeon_sa_bo *sa_bo)
{
return sa_bo->manager->cpu_ptr + sa_bo->soffset;
}
 
extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain);
extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager);
extern int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align, bool block);
extern void radeon_sa_bo_free(struct radeon_device *rdev,
struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence);
#if defined(CONFIG_DEBUG_FS)
extern void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m);
#endif
 
 
#endif
/drivers/video/drm/radeon/radeon_object_kos.c
122,9 → 122,11
bo->reserved.counter = 1;
}
 
struct sg_table;
 
int radeon_bo_create(struct radeon_device *rdev,
unsigned long size, int byte_align, bool kernel, u32 domain,
struct radeon_bo **bo_ptr)
struct sg_table *sg, struct radeon_bo **bo_ptr)
{
struct radeon_bo *bo;
enum ttm_bo_type type;
218,7 → 220,7
pagelist = &((u32_t*)page_tabs)[(u32_t)bo->kptr >> 12];
dbgprintf("pagelist %x\n", pagelist);
radeon_gart_bind(bo->rdev, bo->tbo.offset,
bo->tbo.vm_node->size, pagelist);
bo->tbo.vm_node->size, pagelist, NULL);
bo->tbo.offset += (u64)bo->rdev->mc.gtt_start;
}
else
/drivers/video/drm/radeon/radeon_pm.c
20,20 → 20,18
* Authors: Rafał Miłecki <zajec5@gmail.com>
* Alex Deucher <alexdeucher@gmail.com>
*/
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon.h"
#include "avivod.h"
#include "atom.h"
 
#define DRM_DEBUG_DRIVER(fmt, args...)
 
#define RADEON_IDLE_LOOP_MS 100
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
#define RADEON_WAIT_IDLE_TIMEOUT 200
 
static const char *radeon_pm_state_type_name[5] = {
"Default",
"",
"Powersave",
"Battery",
"Balanced",
47,24 → 45,26
static void radeon_pm_update_profile(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
 
static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
int radeon_pm_get_type_index(struct radeon_device *rdev,
enum radeon_pm_state_type ps_type,
int instance)
{
int i;
int found_instance = -1;
 
#define ACPI_AC_CLASS "ac_adapter"
for (i = 0; i < rdev->pm.num_power_states; i++) {
if (rdev->pm.power_state[i].type == ps_type) {
found_instance++;
if (found_instance == instance)
return i;
}
}
/* return default if no match */
return rdev->pm.default_power_state_index;
}
 
#ifdef CONFIG_ACPI
static int radeon_acpi_event(struct notifier_block *nb,
unsigned long val,
void *data)
void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
{
struct radeon_device *rdev = container_of(nb, struct radeon_device, acpi_nb);
struct acpi_bus_event *entry = (struct acpi_bus_event *)data;
 
if (strcmp(entry->device_class, ACPI_AC_CLASS) == 0) {
if (power_supply_is_system_supplied() > 0)
DRM_DEBUG_DRIVER("pm: AC\n");
else
DRM_DEBUG_DRIVER("pm: DC\n");
 
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (rdev->pm.profile == PM_PROFILE_AUTO) {
mutex_lock(&rdev->pm.mutex);
75,10 → 75,6
}
}
 
return NOTIFY_OK;
}
#endif
 
static void radeon_pm_update_profile(struct radeon_device *rdev)
{
switch (rdev->pm.profile) {
140,6 → 136,15
 
}
 
static void radeon_sync_with_vblank(struct radeon_device *rdev)
{
if (rdev->pm.active_crtcs) {
rdev->pm.vblank_sync = false;
// wait_event_timeout(
// rdev->irq.vblank_queue, rdev->pm.vblank_sync,
// msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
}
}
 
static void radeon_set_power_state(struct radeon_device *rdev)
{
156,8 → 161,21
if (sclk > rdev->pm.default_sclk)
sclk = rdev->pm.default_sclk;
 
/* starting with BTC, there is one state that is used for both
* MH and SH. Difference is that we always use the high clock index for
* mclk.
*/
if ((rdev->pm.pm_method == PM_METHOD_PROFILE) &&
(rdev->family >= CHIP_BARTS) &&
rdev->pm.active_crtc_count &&
((rdev->pm.profile_index == PM_PROFILE_MID_MH_IDX) ||
(rdev->pm.profile_index == PM_PROFILE_LOW_MH_IDX)))
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx].mclk;
else
mclk = rdev->pm.power_state[rdev->pm.requested_power_state_index].
clock_info[rdev->pm.requested_clock_mode_index].mclk;
 
if (mclk > rdev->pm.default_mclk)
mclk = rdev->pm.default_mclk;
 
165,7 → 183,7
if (sclk < rdev->pm.current_sclk)
misc_after = true;
 
// radeon_sync_with_vblank(rdev);
radeon_sync_with_vblank(rdev);
 
if (rdev->pm.pm_method == PM_METHOD_DYNPM) {
if (!radeon_pm_in_vbl(rdev))
188,7 → 206,7
}
 
/* set memory clock */
if (rdev->asic->set_memory_clock && (mclk != rdev->pm.current_mclk)) {
if (rdev->asic->pm.set_memory_clock && (mclk != rdev->pm.current_mclk)) {
radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_memory_clock(rdev, mclk);
radeon_pm_debug_check_in_vbl(rdev, true);
218,27 → 236,16
return;
 
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
mutex_lock(&rdev->cp.mutex);
// down_write(&rdev->pm.mclk_lock);
mutex_lock(&rdev->ring_lock);
 
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
if (rdev->irq.installed) {
/* wait for GPU idle */
rdev->pm.gui_idle = false;
rdev->irq.gui_idle = true;
/* wait for the rings to drain */
for (i = 0; i < RADEON_NUM_RINGS; i++) {
struct radeon_ring *ring = &rdev->ring[i];
if (ring->ready)
radeon_fence_wait_empty_locked(rdev, i);
}
} else {
if (rdev->cp.ready) {
// struct radeon_fence *fence;
// radeon_ring_alloc(rdev, 64);
// radeon_fence_create(rdev, &fence);
// radeon_fence_emit(rdev, fence);
// radeon_ring_commit(rdev);
// radeon_fence_wait(fence, false);
// radeon_fence_unref(&fence);
}
}
 
radeon_unmap_vram_bos(rdev);
 
if (rdev->irq.installed) {
268,8 → 275,8
 
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
 
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ring_lock);
// up_write(&rdev->pm.mclk_lock);
mutex_unlock(&rdev->ddev->struct_mutex);
}
 
294,17 → 301,15
for (j = 0; j < power_state->num_clock_modes; j++) {
clock_info = &(power_state->clock_info[j]);
if (rdev->flags & RADEON_IS_IGP)
DRM_DEBUG_DRIVER("\t\t%d e: %d%s\n",
DRM_DEBUG_DRIVER("\t\t%d e: %d\n",
j,
clock_info->sclk * 10,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
clock_info->sclk * 10);
else
DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d%s\n",
DRM_DEBUG_DRIVER("\t\t%d e: %d\tm: %d\tv: %d\n",
j,
clock_info->sclk * 10,
clock_info->mclk * 10,
clock_info->voltage.voltage,
clock_info->flags & RADEON_PM_MODE_NO_DISPLAY ? "\tNo display only" : "");
clock_info->voltage.voltage);
}
}
}
313,8 → 318,15
struct device_attribute *attr,
char *buf)
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
int cp = rdev->pm.profile;
 
return snprintf(buf, PAGE_SIZE, "%s\n", "default");
return snprintf(buf, PAGE_SIZE, "%s\n",
(cp == PM_PROFILE_AUTO) ? "auto" :
(cp == PM_PROFILE_LOW) ? "low" :
(cp == PM_PROFILE_MID) ? "mid" :
(cp == PM_PROFILE_HIGH) ? "high" : "default");
}
 
static ssize_t radeon_set_pm_profile(struct device *dev,
326,11 → 338,26
struct radeon_device *rdev = ddev->dev_private;
 
mutex_lock(&rdev->pm.mutex);
 
if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
if (strncmp("default", buf, strlen("default")) == 0)
rdev->pm.profile = PM_PROFILE_DEFAULT;
 
else if (strncmp("auto", buf, strlen("auto")) == 0)
rdev->pm.profile = PM_PROFILE_AUTO;
else if (strncmp("low", buf, strlen("low")) == 0)
rdev->pm.profile = PM_PROFILE_LOW;
else if (strncmp("mid", buf, strlen("mid")) == 0)
rdev->pm.profile = PM_PROFILE_MID;
else if (strncmp("high", buf, strlen("high")) == 0)
rdev->pm.profile = PM_PROFILE_HIGH;
else {
count = -EINVAL;
goto fail;
}
radeon_pm_update_profile(rdev);
radeon_pm_set_clocks(rdev);
} else
count = -EINVAL;
 
fail:
mutex_unlock(&rdev->pm.mutex);
 
373,7 → 400,7
mutex_unlock(&rdev->pm.mutex);
// cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work);
} else {
DRM_ERROR("invalid power method!\n");
count = -EINVAL;
goto fail;
}
radeon_pm_compute_clocks(rdev);
381,6 → 408,9
return count;
}
 
//static DEVICE_ATTR(power_profile, S_IRUGO | S_IWUSR, radeon_get_pm_profile, radeon_set_pm_profile);
//static DEVICE_ATTR(power_method, S_IRUGO | S_IWUSR, radeon_get_pm_method, radeon_set_pm_method);
 
static ssize_t radeon_hwmon_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
387,7 → 417,7
{
struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev));
struct radeon_device *rdev = ddev->dev_private;
u32 temp;
int temp;
 
switch (rdev->pm.int_thermal_type) {
case THERMAL_TYPE_RV6XX:
400,6 → 430,12
case THERMAL_TYPE_NI:
temp = evergreen_get_temp(rdev);
break;
case THERMAL_TYPE_SUMO:
temp = sumo_get_temp(rdev);
break;
case THERMAL_TYPE_SI:
temp = si_get_temp(rdev);
break;
default:
temp = 0;
break;
/drivers/video/drm/radeon/radeon_reg.h
56,6 → 56,7
#include "r600_reg.h"
#include "evergreen_reg.h"
#include "ni_reg.h"
#include "si_reg.h"
 
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
539,9 → 540,11
 
#define RADEON_CRTC2_PITCH 0x032c
#define RADEON_CRTC_STATUS 0x005c
# define RADEON_CRTC_VBLANK_CUR (1 << 0)
# define RADEON_CRTC_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC2_STATUS 0x03fc
# define RADEON_CRTC2_VBLANK_CUR (1 << 0)
# define RADEON_CRTC2_VBLANK_SAVE (1 << 1)
# define RADEON_CRTC2_VBLANK_SAVE_CLEAR (1 << 1)
#define RADEON_CRTC_V_SYNC_STRT_WID 0x020c
/drivers/video/drm/radeon/radeon_ring.c
24,226 → 24,365
* Authors: Dave Airlie
* Alex Deucher
* Jerome Glisse
* Christian Konig
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include "radeon_drm.h"
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"
 
int radeon_debugfs_ib_init(struct radeon_device *rdev);
 
/*
* IB.
* IB
* IBs (Indirect Buffers) and areas of GPU accessible memory where
* commands are stored. You can put a pointer to the IB in the
* command ring and the hw will fetch the commands from the IB
* and execute them. Generally userspace acceleration drivers
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
static int radeon_debugfs_sa_init(struct radeon_device *rdev);
 
/**
* radeon_ib_get - request an IB (Indirect Buffer)
*
* @rdev: radeon_device pointer
* @ring: ring index the IB is associated with
* @ib: IB object returned
* @size: requested IB size
*
* Request an IB (all asics). IBs are allocated using the
* suballocator.
* Returns 0 on success, error on failure.
*/
int radeon_ib_get(struct radeon_device *rdev, int ring,
struct radeon_ib *ib, struct radeon_vm *vm,
unsigned size)
{
struct radeon_fence *fence;
struct radeon_ib *nib;
int r = 0, i, c;
int i, r;
 
*ib = NULL;
r = radeon_fence_create(rdev, &fence);
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &ib->sa_bo, size, 256, true);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
dev_err(rdev->dev, "failed to get a new IB (%d)\n", r);
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
i &= (RADEON_IB_POOL_SIZE - 1);
if (rdev->ib_pool.ibs[i].free) {
nib = &rdev->ib_pool.ibs[i];
break;
}
}
if (nib == NULL) {
/* This should never happen, it means we allocated all
* IB and haven't scheduled one yet, return EBUSY to
* userspace hoping that on ioctl recall we get better
* luck
*/
dev_err(rdev->dev, "no free indirect buffer !\n");
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return -EBUSY;
}
rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
nib->free = false;
if (nib->fence) {
mutex_unlock(&rdev->ib_pool.mutex);
r = radeon_fence_wait(nib->fence, false);
 
r = radeon_semaphore_create(rdev, &ib->semaphore);
if (r) {
dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
mutex_lock(&rdev->ib_pool.mutex);
nib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
return r;
}
mutex_lock(&rdev->ib_pool.mutex);
 
ib->ring = ring;
ib->fence = NULL;
ib->ptr = radeon_sa_bo_cpu_addr(ib->sa_bo);
ib->vm = vm;
if (vm) {
/* ib pool is bound at RADEON_VA_IB_OFFSET in virtual address
* space and soffset is the offset inside the pool bo
*/
ib->gpu_addr = ib->sa_bo->soffset + RADEON_VA_IB_OFFSET;
} else {
ib->gpu_addr = radeon_sa_bo_gpu_addr(ib->sa_bo);
}
radeon_fence_unref(&nib->fence);
nib->fence = fence;
nib->length_dw = 0;
mutex_unlock(&rdev->ib_pool.mutex);
*ib = nib;
ib->is_const_ib = false;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
ib->sync_to[i] = NULL;
 
return 0;
}
 
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
/**
* radeon_ib_free - free an IB (Indirect Buffer)
*
* @rdev: radeon_device pointer
* @ib: IB object to free
*
* Free an IB (all asics).
*/
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ib *tmp = *ib;
 
*ib = NULL;
if (tmp == NULL) {
return;
radeon_semaphore_free(rdev, &ib->semaphore, ib->fence);
radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
radeon_fence_unref(&ib->fence);
}
if (!tmp->fence->emited)
radeon_fence_unref(&tmp->fence);
mutex_lock(&rdev->ib_pool.mutex);
tmp->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
}
 
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
/**
* radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring
*
* @rdev: radeon_device pointer
* @ib: IB object to schedule
* @const_ib: Const IB to schedule (SI only)
*
* Schedule an IB on the associated ring (all asics).
* Returns 0 on success, error on failure.
*
* On SI, there are two parallel engines fed from the primary ring,
* the CE (Constant Engine) and the DE (Drawing Engine). Since
* resource descriptors have moved to memory, the CE allows you to
* prime the caches while the DE is updating register state so that
* the resource descriptors will be already in cache when the draw is
* processed. To accomplish this, the userspace driver submits two
* IBs, one for the CE and one for the DE. If there is a CE IB (called
* a CONST_IB), it will be put on the ring prior to the DE IB. Prior
* to SI there was just a DE IB.
*/
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
struct radeon_ib *const_ib)
{
int r = 0;
struct radeon_ring *ring = &rdev->ring[ib->ring];
bool need_sync = false;
int i, r = 0;
 
if (!ib->length_dw || !rdev->cp.ready) {
if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
dev_err(rdev->dev, "couldn't schedule ib\n");
return -EINVAL;
}
 
/* 64 dwords should be enough for fence too */
r = radeon_ring_lock(rdev, 64);
r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
if (r) {
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
}
radeon_ring_ib_execute(rdev, ib);
radeon_fence_emit(rdev, ib->fence);
mutex_lock(&rdev->ib_pool.mutex);
/* once scheduled IB is considered free and protected by the fence */
ib->free = true;
mutex_unlock(&rdev->ib_pool.mutex);
radeon_ring_unlock_commit(rdev);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_fence *fence = ib->sync_to[i];
if (radeon_fence_need_sync(fence, ib->ring)) {
need_sync = true;
radeon_semaphore_sync_rings(rdev, ib->semaphore,
fence->ring, ib->ring);
radeon_fence_note_sync(fence, ib->ring);
}
}
/* immediately free semaphore when we don't need to sync */
if (!need_sync) {
radeon_semaphore_free(rdev, &ib->semaphore, NULL);
}
/* if we can't remember our last VM flush then flush now! */
if (ib->vm && !ib->vm->last_flush) {
radeon_ring_vm_flush(rdev, ib->ring, ib->vm);
}
if (const_ib) {
radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
radeon_semaphore_free(rdev, &const_ib->semaphore, NULL);
}
radeon_ring_ib_execute(rdev, ib->ring, ib);
r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
if (r) {
dev_err(rdev->dev, "failed to emit fence for new IB (%d)\n", r);
radeon_ring_unlock_undo(rdev, ring);
return r;
}
if (const_ib) {
const_ib->fence = radeon_fence_ref(ib->fence);
}
/* we just flushed the VM, remember that */
if (ib->vm && !ib->vm->last_flush) {
ib->vm->last_flush = radeon_fence_ref(ib->fence);
}
radeon_ring_unlock_commit(rdev, ring);
return 0;
}
 
/**
* radeon_ib_pool_init - Init the IB (Indirect Buffer) pool
*
* @rdev: radeon_device pointer
*
* Initialize the suballocator to manage a pool of memory
* for use as IBs (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ib_pool_init(struct radeon_device *rdev)
{
void *ptr;
uint64_t gpu_addr;
int i;
int r = 0;
int r;
 
if (rdev->ib_pool.robj)
if (rdev->ib_pool_ready) {
return 0;
INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
&rdev->ib_pool.robj);
if (r) {
DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
return r;
}
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
r = radeon_sa_bo_manager_init(rdev, &rdev->ring_tmp_bo,
RADEON_IB_POOL_SIZE*64*1024,
RADEON_GEM_DOMAIN_GTT);
if (r) {
radeon_bo_unreserve(rdev->ib_pool.robj);
DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
radeon_bo_unreserve(rdev->ib_pool.robj);
 
r = radeon_sa_bo_manager_start(rdev, &rdev->ring_tmp_bo);
if (r) {
DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
return r;
}
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
unsigned offset;
 
offset = i * 64 * 1024;
rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
rdev->ib_pool.ibs[i].ptr = ptr + offset;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
rdev->ib_pool.ibs[i].free = true;
rdev->ib_pool_ready = true;
if (radeon_debugfs_sa_init(rdev)) {
dev_err(rdev->dev, "failed to register debugfs file for SA\n");
}
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
return 0;
}
return r;
}
 
/**
* radeon_ib_pool_fini - Free the IB (Indirect Buffer) pool
*
* @rdev: radeon_device pointer
*
* Tear down the suballocator managing the pool of memory
* for use as IBs (all asics).
*/
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
if (rdev->ib_pool_ready) {
radeon_sa_bo_manager_suspend(rdev, &rdev->ring_tmp_bo);
radeon_sa_bo_manager_fini(rdev, &rdev->ring_tmp_bo);
rdev->ib_pool_ready = false;
}
}
 
/**
* radeon_ib_ring_tests - test IBs on the rings
*
* @rdev: radeon_device pointer
*
* Test an IB (Indirect Buffer) on each ring.
* If the test fails, disable the ring.
* Returns 0 on success, error if the primary GFX ring
* IB test fails.
*/
int radeon_ib_ring_tests(struct radeon_device *rdev)
{
unsigned i;
int r;
struct radeon_bo *robj;
 
if (!rdev->ib_pool.ready) {
return;
}
mutex_lock(&rdev->ib_pool.mutex);
// radeon_ib_bogus_cleanup(rdev);
robj = rdev->ib_pool.robj;
rdev->ib_pool.robj = NULL;
mutex_unlock(&rdev->ib_pool.mutex);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_ring *ring = &rdev->ring[i];
 
if (robj) {
r = radeon_bo_reserve(robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(robj);
radeon_bo_unpin(robj);
radeon_bo_unreserve(robj);
if (!ring->ready)
continue;
 
r = radeon_ib_test(rdev, i, ring);
if (r) {
ring->ready = false;
 
if (i == RADEON_RING_TYPE_GFX_INDEX) {
/* oh, oh, that's really bad */
DRM_ERROR("radeon: failed testing IB on GFX ring (%d).\n", r);
rdev->accel_working = false;
return r;
 
} else {
/* still not good, but we can live with it */
DRM_ERROR("radeon: failed testing IB on ring %d (%d).\n", i, r);
}
radeon_bo_unref(&robj);
}
}
return 0;
}
 
 
/*
* Ring.
* Rings
* Most engines on the GPU are fed via ring buffers. Ring
* buffers are areas of GPU accessible memory that the host
* writes commands into and the GPU reads commands out of.
* There is a rptr (read pointer) that determines where the
* GPU is currently reading, and a wptr (write pointer)
* which determines where the host has written. When the
* pointers are equal, the ring is idle. When the host
* writes commands to the ring buffer, it increments the
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
void radeon_ring_free_size(struct radeon_device *rdev)
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring);
 
/**
* radeon_ring_write - write a value to the ring
*
* @ring: radeon_ring structure holding ring information
* @v: dword (dw) value to write
*
* Write a value to the requested ring buffer (all asics).
*/
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
}
#endif
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
 
/**
* radeon_ring_supports_scratch_reg - check if the ring supports
* writing to scratch registers
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Check if a specific ring supports writing to scratch registers (all asics).
* Returns true if the ring supports writing to scratch regs, false if not.
*/
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev,
struct radeon_ring *ring)
{
switch (ring->idx) {
case RADEON_RING_TYPE_GFX_INDEX:
case CAYMAN_RING_TYPE_CP1_INDEX:
case CAYMAN_RING_TYPE_CP2_INDEX:
return true;
default:
return false;
}
}
 
/**
* radeon_ring_free_size - update the free size
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Update the free dw slots in the ring buffer (all asics).
*/
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rptr;
 
if (rdev->wb.enabled)
rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
else {
if (rdev->family >= CHIP_R600)
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
else
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
}
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
/* This works because ring_size is a power of 2 */
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
rdev->cp.ring_free_dw -= rdev->cp.wptr;
rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
if (!rdev->cp.ring_free_dw) {
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
ring->ring_free_dw -= ring->wptr;
ring->ring_free_dw &= ring->ptr_mask;
if (!ring->ring_free_dw) {
ring->ring_free_dw = ring->ring_size / 4;
}
}
 
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
/**
* radeon_ring_alloc - allocate space on the ring buffer
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Allocate @ndw dwords in the ring buffer (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
 
/* Align requested size with padding so unlock_commit can
* pad safely */
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
while (ndw > (rdev->cp.ring_free_dw - 1)) {
radeon_ring_free_size(rdev);
if (ndw < rdev->cp.ring_free_dw) {
ndw = (ndw + ring->align_mask) & ~ring->align_mask;
while (ndw > (ring->ring_free_dw - 1)) {
radeon_ring_free_size(rdev, ring);
if (ndw < ring->ring_free_dw) {
break;
}
// r = radeon_fence_wait_next(rdev);
252,98 → 391,362
// return r;
// }
}
rdev->cp.count_dw = ndw;
rdev->cp.wptr_old = rdev->cp.wptr;
ring->count_dw = ndw;
ring->wptr_old = ring->wptr;
return 0;
}
 
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
/**
* radeon_ring_lock - lock the ring and allocate space on it
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ndw: number of dwords to allocate in the ring buffer
*
* Lock the ring and allocate @ndw dwords in the ring buffer
* (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
 
mutex_lock(&rdev->cp.mutex);
r = radeon_ring_alloc(rdev, ndw);
mutex_lock(&rdev->ring_lock);
r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
mutex_unlock(&rdev->cp.mutex);
mutex_unlock(&rdev->ring_lock);
return r;
}
return 0;
}
 
void radeon_ring_commit(struct radeon_device *rdev)
/**
* radeon_ring_commit - tell the GPU to execute the new
* commands on the ring buffer
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Update the wptr (write pointer) to tell the GPU to
* execute new commands on the ring buffer (all asics).
*/
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned count_dw_pad;
unsigned i;
 
/* We pad to match fetch size */
count_dw_pad = (rdev->cp.align_mask + 1) -
(rdev->cp.wptr & rdev->cp.align_mask);
for (i = 0; i < count_dw_pad; i++) {
radeon_ring_write(rdev, 2 << 30);
while (ring->wptr & ring->align_mask) {
radeon_ring_write(ring, ring->nop);
}
DRM_MEMORYBARRIER();
radeon_cp_commit(rdev);
WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
(void)RREG32(ring->wptr_reg);
}
 
void radeon_ring_unlock_commit(struct radeon_device *rdev)
/**
* radeon_ring_unlock_commit - tell the GPU to execute the new
* commands on the ring buffer and unlock it
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Call radeon_ring_commit() then unlock the ring (all asics).
*/
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_commit(rdev);
mutex_unlock(&rdev->cp.mutex);
radeon_ring_commit(rdev, ring);
mutex_unlock(&rdev->ring_lock);
}
 
void radeon_ring_unlock_undo(struct radeon_device *rdev)
/**
* radeon_ring_undo - reset the wptr
*
* @ring: radeon_ring structure holding ring information
*
* Reset the driver's copy of the wtpr (all asics).
*/
void radeon_ring_undo(struct radeon_ring *ring)
{
rdev->cp.wptr = rdev->cp.wptr_old;
mutex_unlock(&rdev->cp.mutex);
ring->wptr = ring->wptr_old;
}
 
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
/**
* radeon_ring_unlock_undo - reset the wptr and unlock the ring
*
* @ring: radeon_ring structure holding ring information
*
* Call radeon_ring_undo() then unlock the ring (all asics).
*/
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
radeon_ring_undo(ring);
mutex_unlock(&rdev->ring_lock);
}
 
/**
* radeon_ring_force_activity - add some nop packets to the ring
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Add some nop packets to the ring to force activity (all asics).
* Used for lockup detection to see if the rptr is advancing.
*/
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
 
rdev->cp.ring_size = ring_size;
radeon_ring_free_size(rdev, ring);
if (ring->rptr == ring->wptr) {
r = radeon_ring_alloc(rdev, ring, 1);
if (!r) {
radeon_ring_write(ring, ring->nop);
radeon_ring_commit(rdev, ring);
}
}
}
 
/**
* radeon_ring_force_activity - update lockup variables
*
* @ring: radeon_ring structure holding ring information
*
* Update the last rptr value and timestamp (all asics).
*/
void radeon_ring_lockup_update(struct radeon_ring *ring)
{
ring->last_rptr = ring->rptr;
ring->last_activity = GetTimerTicks();
}
 
/**
* radeon_ring_test_lockup() - check if ring is lockedup by recording information
* @rdev: radeon device structure
* @ring: radeon_ring structure holding ring information
*
* We don't need to initialize the lockup tracking information as we will either
* have CP rptr to a different value of jiffies wrap around which will force
* initialization of the lockup tracking informations.
*
* A possible false positivie is if we get call after while and last_cp_rptr ==
* the current CP rptr, even if it's unlikely it might happen. To avoid this
* if the elapsed time since last call is bigger than 2 second than we return
* false and update the tracking information. Due to this the caller must call
* radeon_ring_test_lockup several time in less than 2sec for lockup to be reported
* the fencing code should be cautious about that.
*
* Caller should write to the ring to force CP to do something so we don't get
* false positive when CP is just gived nothing to do.
*
**/
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
uint32_t rptr;
 
cjiffies = GetTimerTicks();
if (!time_after(cjiffies, ring->last_activity)) {
/* likely a wrap around */
radeon_ring_lockup_update(ring);
return false;
}
rptr = RREG32(ring->rptr_reg);
ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
if (ring->rptr != ring->last_rptr) {
/* CP is still working no lockup */
radeon_ring_lockup_update(ring);
return false;
}
elapsed = jiffies_to_msecs(cjiffies - ring->last_activity);
if (radeon_lockup_timeout && elapsed >= radeon_lockup_timeout) {
dev_err(rdev->dev, "GPU lockup CP stall for more than %lumsec\n", elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
 
/**
* radeon_ring_backup - Back up the content of a ring
*
* @rdev: radeon_device pointer
* @ring: the ring we want to back up
*
* Saves all unprocessed commits from a ring, returns the number of dwords saved.
*/
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring,
uint32_t **data)
{
unsigned size, ptr, i;
 
/* just in case lock the ring */
mutex_lock(&rdev->ring_lock);
*data = NULL;
 
if (ring->ring_obj == NULL) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
/* it doesn't make sense to save anything if all fences are signaled */
if (!radeon_fence_count_emitted(rdev, ring->idx)) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
/* calculate the number of dw on the ring */
if (ring->rptr_save_reg)
ptr = RREG32(ring->rptr_save_reg);
else if (rdev->wb.enabled)
ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
else {
/* no way to read back the next rptr */
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
size = ring->wptr + (ring->ring_size / 4);
size -= ptr;
size &= ring->ptr_mask;
if (size == 0) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
 
/* and then save the content of the ring */
*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
if (!*data) {
mutex_unlock(&rdev->ring_lock);
return 0;
}
for (i = 0; i < size; ++i) {
(*data)[i] = ring->ring[ptr++];
ptr &= ring->ptr_mask;
}
 
mutex_unlock(&rdev->ring_lock);
return size;
}
 
/**
* radeon_ring_restore - append saved commands to the ring again
*
* @rdev: radeon_device pointer
* @ring: ring to append commands to
* @size: number of dwords we want to write
* @data: saved commands
*
* Allocates space on the ring and restore the previously saved commands.
*/
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned size, uint32_t *data)
{
int i, r;
 
if (!size || !data)
return 0;
 
/* restore the saved ring content */
r = radeon_ring_lock(rdev, ring, size);
if (r)
return r;
 
for (i = 0; i < size; ++i) {
radeon_ring_write(ring, data[i]);
}
 
radeon_ring_unlock_commit(rdev, ring);
kfree(data);
return 0;
}
 
/**
* radeon_ring_init - init driver ring struct.
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
* @ring_size: size of the ring
* @rptr_offs: offset of the rptr writeback location in the WB buffer
* @rptr_reg: MMIO offset of the rptr register
* @wptr_reg: MMIO offset of the wptr register
* @ptr_reg_shift: bit offset of the rptr/wptr values
* @ptr_reg_mask: bit mask of the rptr/wptr values
* @nop: nop packet for this ring
*
* Initialize the driver information for the selected ring (all asics).
* Returns 0 on success, error on failure.
*/
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
 
ring->ring_size = ring_size;
ring->rptr_offs = rptr_offs;
ring->rptr_reg = rptr_reg;
ring->wptr_reg = wptr_reg;
ring->ptr_reg_shift = ptr_reg_shift;
ring->ptr_reg_mask = ptr_reg_mask;
ring->nop = nop;
/* Allocate ring buffer */
if (rdev->cp.ring_obj == NULL) {
r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
if (ring->ring_obj == NULL) {
r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
&rdev->cp.ring_obj);
NULL, &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
r = radeon_bo_reserve(rdev->cp.ring_obj, false);
r = radeon_bo_reserve(ring->ring_obj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
&rdev->cp.gpu_addr);
r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
&ring->gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->cp.ring_obj);
radeon_bo_unreserve(ring->ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
r = radeon_bo_kmap(rdev->cp.ring_obj,
(void **)&rdev->cp.ring);
radeon_bo_unreserve(rdev->cp.ring_obj);
r = radeon_bo_kmap(ring->ring_obj,
(void **)&ring->ring);
radeon_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
ring->ptr_mask = (ring->ring_size / 4) - 1;
ring->ring_free_dw = ring->ring_size / 4;
if (rdev->wb.enabled) {
u32 index = RADEON_WB_RING0_NEXT_RPTR + (ring->idx * 4);
ring->next_rptr_gpu_addr = rdev->wb.gpu_addr + index;
ring->next_rptr_cpu_addr = &rdev->wb.wb[index/4];
}
if (radeon_debugfs_ring_init(rdev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n");
}
radeon_ring_lockup_update(ring);
return 0;
}
 
void radeon_ring_fini(struct radeon_device *rdev)
/**
* radeon_ring_fini - tear down the driver ring struct.
*
* @rdev: radeon_device pointer
* @ring: radeon_ring structure holding ring information
*
* Tear down the driver information for the selected ring (all asics).
*/
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
struct radeon_bo *ring_obj;
 
mutex_lock(&rdev->cp.mutex);
ring_obj = rdev->cp.ring_obj;
rdev->cp.ring = NULL;
rdev->cp.ring_obj = NULL;
mutex_unlock(&rdev->cp.mutex);
mutex_lock(&rdev->ring_lock);
ring_obj = ring->ring_obj;
ring->ready = false;
ring->ring = NULL;
ring->ring_obj = NULL;
mutex_unlock(&rdev->ring_lock);
 
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
356,81 → 759,92
}
}
 
 
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
 
static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct radeon_ib *ib = node->info_ent->data;
unsigned i;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
int ridx = *(int*)node->info_ent->data;
struct radeon_ring *ring = &rdev->ring[ridx];
unsigned count, i, j;
 
if (ib == NULL) {
return 0;
radeon_ring_free_size(rdev, ring);
count = (ring->ring_size / 4) - ring->ring_free_dw;
seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
if (ring->rptr_save_reg) {
seq_printf(m, "rptr next(0x%04x): 0x%08x\n", ring->rptr_save_reg,
RREG32(ring->rptr_save_reg));
}
seq_printf(m, "IB %04u\n", ib->idx);
seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
i = ring->rptr;
for (j = 0; j <= count; j++) {
seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
i = (i + 1) & ring->ptr_mask;
}
return 0;
}
 
static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
 
static struct drm_info_list radeon_debugfs_ring_info_list[] = {
{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};
 
static int radeon_debugfs_sa_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct radeon_device *rdev = node->info_ent->data;
struct radeon_ib *ib;
unsigned i;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
 
mutex_lock(&rdev->ib_pool.mutex);
if (list_empty(&rdev->ib_pool.bogus_ib)) {
mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "no bogus IB recorded\n");
radeon_sa_bo_dump_debug_info(&rdev->ring_tmp_bo, m);
 
return 0;
 
}
ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
list_del_init(&ib->list);
mutex_unlock(&rdev->ib_pool.mutex);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
vfree(ib->ptr);
kfree(ib);
return 0;
}
 
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
static struct drm_info_list radeon_debugfs_sa_list[] = {
{"radeon_sa_info", &radeon_debugfs_sa_info, 0, NULL},
};
 
static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
};
#endif
 
int radeon_debugfs_ib_init(struct radeon_device *rdev)
static int radeon_debugfs_ring_init(struct radeon_device *rdev, struct radeon_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
int r;
for (i = 0; i < ARRAY_SIZE(radeon_debugfs_ring_info_list); ++i) {
struct drm_info_list *info = &radeon_debugfs_ring_info_list[i];
int ridx = *(int*)radeon_debugfs_ring_info_list[i].data;
unsigned r;
 
radeon_debugfs_ib_bogus_info_list[0].data = rdev;
r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
if (&rdev->ring[ridx] != ring)
continue;
 
r = radeon_debugfs_add_files(rdev, info, 1);
if (r)
return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
radeon_debugfs_ib_list[i].driver_features = 0;
radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
}
return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
RADEON_IB_POOL_SIZE);
#endif
return 0;
}
 
static int radeon_debugfs_sa_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_debugfs_sa_list, 1);
#else
return 0;
#endif
/drivers/video/drm/radeon/radeon_sa.c
0,0 → 1,419
/*
* Copyright 2011 Red Hat Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Jerome Glisse <glisse@freedesktop.org>
*/
/* Algorithm:
*
* We store the last allocated bo in "hole", we always try to allocate
* after the last allocated bo. Principle is that in a linear GPU ring
* progression was is after last is the oldest bo we allocated and thus
* the first one that should no longer be in use by the GPU.
*
* If it's not the case we skip over the bo after last to the closest
* done bo if such one exist. If none exist and we are not asked to
* block we report failure to allocate.
*
* If we are asked to block we wait on all the oldest fence of all
* rings. We just wait for any of those fence to complete.
*/
#include <drm/drmP.h>
#include "radeon.h"
 
static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
 
int radeon_sa_bo_manager_init(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
unsigned size, u32 domain)
{
int i, r;
 
init_waitqueue_head(&sa_manager->wq);
sa_manager->bo = NULL;
sa_manager->size = size;
sa_manager->domain = domain;
sa_manager->hole = &sa_manager->olist;
INIT_LIST_HEAD(&sa_manager->olist);
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
INIT_LIST_HEAD(&sa_manager->flist[i]);
}
 
r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
RADEON_GEM_DOMAIN_CPU, NULL, &sa_manager->bo);
if (r) {
dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
return r;
}
 
return r;
}
 
void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager)
{
struct radeon_sa_bo *sa_bo, *tmp;
 
if (!list_empty(&sa_manager->olist)) {
sa_manager->hole = &sa_manager->olist,
radeon_sa_bo_try_free(sa_manager);
if (!list_empty(&sa_manager->olist)) {
dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
}
}
list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
radeon_sa_bo_remove_locked(sa_bo);
}
radeon_bo_unref(&sa_manager->bo);
sa_manager->size = 0;
}
 
int radeon_sa_bo_manager_start(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager)
{
int r;
 
if (sa_manager->bo == NULL) {
dev_err(rdev->dev, "no bo for sa manager\n");
return -EINVAL;
}
 
/* map the buffer */
r = radeon_bo_reserve(sa_manager->bo, false);
if (r) {
dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
return r;
}
r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
if (r) {
radeon_bo_unreserve(sa_manager->bo);
dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
return r;
}
r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
radeon_bo_unreserve(sa_manager->bo);
return r;
}
 
int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager)
{
int r;
 
if (sa_manager->bo == NULL) {
dev_err(rdev->dev, "no bo for sa manager\n");
return -EINVAL;
}
 
r = radeon_bo_reserve(sa_manager->bo, false);
if (!r) {
radeon_bo_kunmap(sa_manager->bo);
radeon_bo_unpin(sa_manager->bo);
radeon_bo_unreserve(sa_manager->bo);
}
return r;
}
 
static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
{
struct radeon_sa_manager *sa_manager = sa_bo->manager;
if (sa_manager->hole == &sa_bo->olist) {
sa_manager->hole = sa_bo->olist.prev;
}
list_del_init(&sa_bo->olist);
list_del_init(&sa_bo->flist);
radeon_fence_unref(&sa_bo->fence);
kfree(sa_bo);
}
 
static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
{
struct radeon_sa_bo *sa_bo, *tmp;
 
if (sa_manager->hole->next == &sa_manager->olist)
return;
 
sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
return;
}
radeon_sa_bo_remove_locked(sa_bo);
}
}
 
static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
 
if (hole != &sa_manager->olist) {
return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
}
return 0;
}
 
static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
{
struct list_head *hole = sa_manager->hole;
 
if (hole->next != &sa_manager->olist) {
return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
}
return sa_manager->size;
}
 
static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo *sa_bo,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
 
soffset = radeon_sa_bo_hole_soffset(sa_manager);
eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;
 
if ((eoffset - soffset) >= (size + wasted)) {
soffset += wasted;
 
sa_bo->manager = sa_manager;
sa_bo->soffset = soffset;
sa_bo->eoffset = soffset + size;
list_add(&sa_bo->olist, sa_manager->hole);
INIT_LIST_HEAD(&sa_bo->flist);
sa_manager->hole = &sa_bo->olist;
return true;
}
return false;
}
 
/**
* radeon_sa_event - Check if we can stop waiting
*
* @sa_manager: pointer to the sa_manager
* @size: number of bytes we want to allocate
* @align: alignment we need to match
*
* Check if either there is a fence we can wait for or
* enough free memory to satisfy the allocation directly
*/
static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
unsigned size, unsigned align)
{
unsigned soffset, eoffset, wasted;
int i;
 
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
if (!list_empty(&sa_manager->flist[i])) {
return true;
}
}
 
soffset = radeon_sa_bo_hole_soffset(sa_manager);
eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
wasted = (align - (soffset % align)) % align;
 
if ((eoffset - soffset) >= (size + wasted)) {
return true;
}
 
return false;
}
 
static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
struct radeon_fence **fences,
unsigned *tries)
{
struct radeon_sa_bo *best_bo = NULL;
unsigned i, soffset, best, tmp;
 
/* if hole points to the end of the buffer */
if (sa_manager->hole->next == &sa_manager->olist) {
/* try again with its beginning */
sa_manager->hole = &sa_manager->olist;
return true;
}
 
soffset = radeon_sa_bo_hole_soffset(sa_manager);
/* to handle wrap around we add sa_manager->size */
best = sa_manager->size * 2;
/* go over all fence list and try to find the closest sa_bo
* of the current last
*/
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
struct radeon_sa_bo *sa_bo;
 
if (list_empty(&sa_manager->flist[i])) {
continue;
}
 
sa_bo = list_first_entry(&sa_manager->flist[i],
struct radeon_sa_bo, flist);
 
if (!radeon_fence_signaled(sa_bo->fence)) {
fences[i] = sa_bo->fence;
continue;
}
 
/* limit the number of tries each ring gets */
if (tries[i] > 2) {
continue;
}
 
tmp = sa_bo->soffset;
if (tmp < soffset) {
/* wrap around, pretend it's after */
tmp += sa_manager->size;
}
tmp -= soffset;
if (tmp < best) {
/* this sa bo is the closest one */
best = tmp;
best_bo = sa_bo;
}
}
 
if (best_bo) {
++tries[best_bo->fence->ring];
sa_manager->hole = best_bo->olist.prev;
 
/* we knew that this one is signaled,
so it's save to remote it */
radeon_sa_bo_remove_locked(best_bo);
return true;
}
return false;
}
 
int radeon_sa_bo_new(struct radeon_device *rdev,
struct radeon_sa_manager *sa_manager,
struct radeon_sa_bo **sa_bo,
unsigned size, unsigned align, bool block)
{
struct radeon_fence *fences[RADEON_NUM_RINGS];
unsigned tries[RADEON_NUM_RINGS];
int i, r;
 
BUG_ON(align > RADEON_GPU_PAGE_SIZE);
BUG_ON(size > sa_manager->size);
 
*sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
if ((*sa_bo) == NULL) {
return -ENOMEM;
}
(*sa_bo)->manager = sa_manager;
(*sa_bo)->fence = NULL;
INIT_LIST_HEAD(&(*sa_bo)->olist);
INIT_LIST_HEAD(&(*sa_bo)->flist);
 
spin_lock(&sa_manager->wq.lock);
do {
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
fences[i] = NULL;
tries[i] = 0;
}
 
do {
radeon_sa_bo_try_free(sa_manager);
 
if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
size, align)) {
spin_unlock(&sa_manager->wq.lock);
return 0;
}
 
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
 
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT && block) {
// r = wait_event_interruptible_locked(
// sa_manager->wq,
// radeon_sa_event(sa_manager, size, align)
// );
 
} else if (r == -ENOENT) {
r = -ENOMEM;
}
 
} while (!r);
 
spin_unlock(&sa_manager->wq.lock);
kfree(*sa_bo);
*sa_bo = NULL;
return r;
}
 
void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
struct radeon_fence *fence)
{
struct radeon_sa_manager *sa_manager;
 
if (sa_bo == NULL || *sa_bo == NULL) {
return;
}
 
sa_manager = (*sa_bo)->manager;
spin_lock(&sa_manager->wq.lock);
if (fence && !radeon_fence_signaled(fence)) {
(*sa_bo)->fence = radeon_fence_ref(fence);
list_add_tail(&(*sa_bo)->flist,
&sa_manager->flist[fence->ring]);
} else {
radeon_sa_bo_remove_locked(*sa_bo);
}
// wake_up_all_locked(&sa_manager->wq);
spin_unlock(&sa_manager->wq.lock);
*sa_bo = NULL;
}
 
#if defined(CONFIG_DEBUG_FS)
void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
struct seq_file *m)
{
struct radeon_sa_bo *i;
 
spin_lock(&sa_manager->wq.lock);
list_for_each_entry(i, &sa_manager->olist, olist) {
if (&i->olist == sa_manager->hole) {
seq_printf(m, ">");
} else {
seq_printf(m, " ");
}
seq_printf(m, "[0x%08x 0x%08x] size %8d",
i->soffset, i->eoffset, i->eoffset - i->soffset);
if (i->fence) {
seq_printf(m, " protected by 0x%016llx on ring %d",
i->fence->seq, i->fence->ring);
}
seq_printf(m, "\n");
}
spin_unlock(&sa_manager->wq.lock);
}
#endif
/drivers/video/drm/radeon/radeon_semaphore.c
0,0 → 1,115
/*
* Copyright 2011 Christian König.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors:
* Christian König <deathsimple@vodafone.de>
*/
#include <drm/drmP.h>
#include "radeon.h"
 
 
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
int r;
 
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
&(*semaphore)->sa_bo, 8, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
return r;
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
*((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
return 0;
}
 
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
--semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
}
 
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore)
{
++semaphore->waiters;
radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
}
 
/* caller must hold ring lock */
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int signaler, int waiter)
{
int r;
 
/* no need to signal and wait on the same ring */
if (signaler == waiter) {
return 0;
}
 
/* prevent GPU deadlocks */
if (!rdev->ring[signaler].ready) {
dev_err(rdev->dev, "Trying to sync to a disabled ring!");
return -EINVAL;
}
 
r = radeon_ring_alloc(rdev, &rdev->ring[signaler], 8);
if (r) {
return r;
}
radeon_semaphore_emit_signal(rdev, signaler, semaphore);
radeon_ring_commit(rdev, &rdev->ring[signaler]);
 
/* we assume caller has already allocated space on waiters ring */
radeon_semaphore_emit_wait(rdev, waiter, semaphore);
 
return 0;
}
 
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence)
{
if (semaphore == NULL || *semaphore == NULL) {
return;
}
if ((*semaphore)->waiters > 0) {
dev_err(rdev->dev, "semaphore %p has more waiters than signalers,"
" hardware lockup imminent!\n", *semaphore);
}
radeon_sa_bo_free(rdev, &(*semaphore)->sa_bo, fence);
kfree(*semaphore);
*semaphore = NULL;
}
/drivers/video/drm/radeon/radeon_ttm.c
33,9 → 33,11
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
#include <ttm/ttm_page_alloc.h>
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "radeon_reg.h"
#include "radeon.h"
 
57,12 → 59,12
/*
* Global memory.
*/
static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
 
static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
69,16 → 71,16
 
static int radeon_ttm_global_init(struct radeon_device *rdev)
{
struct ttm_global_reference *global_ref;
struct drm_global_reference *global_ref;
int r;
 
rdev->mman.mem_global_referenced = false;
global_ref = &rdev->mman.mem_global_ref;
global_ref->global_type = TTM_GLOBAL_TTM_MEM;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &radeon_ttm_mem_global_init;
global_ref->release = &radeon_ttm_mem_global_release;
r = ttm_global_item_ref(global_ref);
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
88,14 → 90,14
rdev->mman.bo_global_ref.mem_glob =
rdev->mman.mem_global_ref.object;
global_ref = &rdev->mman.bo_global_ref.ref;
global_ref->global_type = TTM_GLOBAL_TTM_BO;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = ttm_global_item_ref(global_ref);
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
ttm_global_item_unref(&rdev->mman.mem_global_ref);
drm_global_item_unref(&rdev->mman.mem_global_ref);
return r;
}
 
104,9 → 106,11
}
 
 
struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
}
 
 
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
122,7 → 126,8
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
man->gpu_offset = rdev->mc.gtt_location;
man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
133,34 → 138,22
(unsigned)type);
return -EINVAL;
}
man->io_offset = rdev->mc.agp_base;
man->io_size = rdev->mc.gtt_size;
man->io_addr = NULL;
if (!rdev->ddev->agp->cant_use_aperture)
man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
} else
}
#endif
{
man->io_offset = 0;
man->io_size = 0;
man->io_addr = NULL;
}
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
man->gpu_offset = rdev->mc.vram_location;
man->func = &ttm_bo_manager_func;
man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
man->io_addr = NULL;
man->io_offset = rdev->mc.aper_base;
man->io_size = rdev->mc.aper_size;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
/drivers/video/drm/radeon/rdisplay.c
33,7 → 33,7
rdev = (struct radeon_device *)rdisplay->ddev->dev_private;
 
r = radeon_bo_create(rdev, CURSOR_WIDTH*CURSOR_HEIGHT*4,
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, &cursor->robj);
PAGE_SIZE, false, RADEON_GEM_DOMAIN_VRAM, NULL, &cursor->robj);
 
if (unlikely(r != 0))
return r;
294,3 → 294,34
}
 
 
/* 23 bits of float fractional data */
#define I2F_FRAC_BITS 23
#define I2F_MASK ((1 << I2F_FRAC_BITS) - 1)
 
/*
* Converts unsigned integer into 32-bit IEEE floating point representation.
* Will be exact from 0 to 2^24. Above that, we round towards zero
* as the fractional bits will not fit in a float. (It would be better to
* round towards even as the fpu does, but that is slower.)
*/
__pure uint32_t int2float(uint32_t x)
{
uint32_t msb, exponent, fraction;
 
/* Zero is special */
if (!x) return 0;
 
/* Get location of the most significant bit */
msb = __fls(x);
 
/*
* Use a rotate instead of a shift because that works both leftwards
* and rightwards due to the mod(32) behaviour. This means we don't
* need to check to see if we are above 2^24 or not.
*/
fraction = ror32(x, (msb - I2F_FRAC_BITS) & 0x1f) & I2F_MASK;
exponent = (127 + msb) << I2F_FRAC_BITS;
 
return fraction + exponent;
}
 
/drivers/video/drm/radeon/rdisplay_kms.c
272,8 → 272,11
 
fb->width = reqmode->width;
fb->height = reqmode->height;
fb->pitch = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
 
fb->pitches[0] = fb->pitches[1] = fb->pitches[2] =
fb->pitches[3] = radeon_align_pitch(dev->dev_private, reqmode->width, 32, false) * ((32 + 1) / 8);
fb->bits_per_pixel = 32;
fb->depth = 24;
 
crtc->fb = fb;
crtc->enabled = true;
288,13 → 291,13
{
rdisplay->width = fb->width;
rdisplay->height = fb->height;
rdisplay->pitch = fb->pitch;
rdisplay->pitch = fb->pitches[0];
rdisplay->vrefresh = drm_mode_vrefresh(mode);
 
sysSetScreen(fb->width, fb->height, fb->pitch);
sysSetScreen(fb->width, fb->height, fb->pitches[0]);
 
dbgprintf("new mode %d x %d pitch %d\n",
fb->width, fb->height, fb->pitch);
fb->width, fb->height, fb->pitches[0]);
}
else
DRM_ERROR("failed to set mode %d_%d on crtc %p\n",
363,6 → 366,14
{
struct drm_device *dev;
 
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_crtc *crtc = NULL;
struct drm_framebuffer *fb;
struct drm_display_mode *native;
 
 
cursor_t *cursor;
bool retval = false;
u32_t ifl;
374,62 → 385,117
 
ENTER();
 
rdisplay = GetDisplay();
dev = rdev->ddev;
 
dev = rdisplay->ddev = rdev->ddev;
list_for_each_entry(connector, &dev->mode_config.connector_list, head)
{
if( connector->status != connector_status_connected)
continue;
 
ifl = safe_cli();
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if( encoder == NULL)
{
list_for_each_entry(cursor, &rdisplay->cursors, list)
dbgprintf("CONNECTOR %x ID: %d no active encoders\n",
connector, connector->base.id);
continue;
}
connector->encoder = encoder;
 
dbgprintf("CONNECTOR %x ID: %d status %d encoder %x\n crtc %x\n",
connector, connector->base.id,
connector->status, connector->encoder,
encoder->crtc);
 
crtc = encoder->crtc;
break;
};
 
if(connector == NULL)
{
init_cursor(cursor);
dbgprintf("No active connectors!\n");
return -1;
};
 
{
struct drm_display_mode *tmp;
 
list_for_each_entry(tmp, &connector->modes, head) {
if (drm_mode_width(tmp) > 16384 ||
drm_mode_height(tmp) > 16384)
continue;
if (tmp->type & DRM_MODE_TYPE_PREFERRED)
{
native = tmp;
break;
};
safe_sti(ifl);
}
}
 
if( ASIC_IS_AVIVO(rdev) && native )
{
dbgprintf("native w %d h %d\n", native->hdisplay, native->vdisplay);
struct radeon_encoder *radeon_encoder = to_radeon_encoder(connector->encoder);
radeon_encoder->rmx_type = RMX_FULL;
radeon_encoder->native_mode = *native;
};
 
 
rfbdev = rdev->mode_info.rfbdev;
fb_helper = &rfbdev->helper;
if(crtc == NULL)
{
struct drm_crtc *tmp_crtc;
int crtc_mask = 1;
 
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head)
{
if (encoder->possible_crtcs & crtc_mask)
{
crtc = tmp_crtc;
encoder->crtc = crtc;
break;
};
crtc_mask <<= 1;
};
};
 
// for (i = 0; i < fb_helper->crtc_count; i++)
// {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[0].mode_set;
struct drm_crtc *crtc;
struct drm_display_mode *mode;
if(crtc == NULL)
{
dbgprintf("No CRTC for encoder %d\n", encoder->base.id);
return -1;
};
 
crtc = mode_set->crtc;
 
// if (!crtc->enabled)
// continue;
dbgprintf("[Select CRTC:%d]\n", crtc->base.id);
 
mode = mode_set->mode;
 
dbgprintf("crtc %d width %d height %d vrefresh %d\n",
crtc->base.id,
drm_mode_width(mode), drm_mode_height(mode),
drm_mode_vrefresh(mode));
// }
// drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
 
rdisplay = GetDisplay();
rdisplay->ddev = dev;
rdisplay->connector = connector;
rdisplay->crtc = crtc;
 
rdisplay->connector = get_def_connector(dev);
if( rdisplay->connector == 0 )
rdisplay->supported_modes = count_connector_modes(connector);
 
 
 
ifl = safe_cli();
{
dbgprintf("no active connectors\n");
return false;
list_for_each_entry(cursor, &rdisplay->cursors, list)
{
init_cursor(cursor);
};
 
};
safe_sti(ifl);
 
rdisplay->crtc = rdisplay->connector->encoder->crtc = crtc;
 
rdisplay->supported_modes = count_connector_modes(rdisplay->connector);
 
dbgprintf("current mode %d x %d x %d\n",
rdisplay->width, rdisplay->height, rdisplay->vrefresh);
dbgprintf("user mode mode %d x %d x %d\n",
usermode->width, usermode->height, usermode->freq);
 
 
if( (usermode->width != 0) &&
(usermode->height != 0) &&
( (usermode->width != rdisplay->width) ||
439,6 → 505,13
 
retval = set_mode(dev, rdisplay->connector, usermode, false);
}
else
{
usermode->width = rdisplay->width;
usermode->height = rdisplay->height;
usermode->freq = 60;
retval = set_mode(dev, rdisplay->connector, usermode, false);
};
 
ifl = safe_cli();
{
464,7 → 537,7
{
int err = -1;
 
ENTER();
// ENTER();
 
dbgprintf("mode %x count %d\n", mode, *count);
 
497,7 → 570,7
*count = i;
err = 0;
};
LEAVE();
// LEAVE();
return err;
}
 
505,7 → 578,7
{
int err = -1;
 
ENTER();
// ENTER();
 
dbgprintf("width %d height %d vrefresh %d\n",
mode->width, mode->height, mode->freq);
521,14 → 594,13
err = 0;
};
 
LEAVE();
// LEAVE();
return err;
};
 
 
 
int radeonfb_create_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd *mode_cmd,
int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
539,21 → 611,29
int ret;
int aligned_size, size;
int height = mode_cmd->height;
u32 bpp, depth;
 
static struct radeon_bo kos_bo;
static struct drm_mm_node vm_node;
 
 
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
 
/* need to align pitch with crtc limits */
mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
fb_tiled) * ((bpp + 1) / 8);
 
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
size = mode_cmd->pitch * height;
size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
 
 
ret = drm_gem_object_init(rdev->ddev, &kos_bo.gem_base, aligned_size);
if (unlikely(ret)) {
return ret;
printk(KERN_ERR "failed to allocate framebuffer (%d)\n",
aligned_size);
return -ENOMEM;
}
 
kos_bo.rdev = rdev;
569,10 → 649,13
if (fb_tiled)
tiling_flags = RADEON_TILING_MACRO;
 
if (tiling_flags) {
rbo->tiling_flags = tiling_flags | RADEON_TILING_SURFACE;
rbo->pitch = mode_cmd->pitch;
}
// if (tiling_flags) {
// ret = radeon_bo_set_tiling_flags(rbo,
// tiling_flags | RADEON_TILING_SURFACE,
// mode_cmd->pitches[0]);
// if (ret)
// dev_err(rdev->dev, "FB failed to set tiling flags\n");
// }
 
vm_node.size = 0xC00000 >> 12;
vm_node.start = 0;
584,8 → 667,6
rbo->kptr = (void*)0xFE000000;
rbo->pin_count = 1;
 
// if (fb_tiled)
// radeon_bo_check_tiling(rbo, 0, 0);
 
*gobj_p = gobj;
return 0;
592,5 → 673,3
}
 
 
 
 
/drivers/video/drm/radeon/reg_srcs/cayman
0,0 → 1,641
cayman 0x9400
0x0000802C GRBM_GFX_INDEX
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
0x00008958 VGT_PRIMITIVE_TYPE
0x0000895C VGT_INDEX_TYPE
0x00008970 VGT_NUM_INDICES
0x00008974 VGT_NUM_INSTANCES
0x00008990 VGT_COMPUTE_DIM_X
0x00008994 VGT_COMPUTE_DIM_Y
0x00008998 VGT_COMPUTE_DIM_Z
0x0000899C VGT_COMPUTE_START_X
0x000089A0 VGT_COMPUTE_START_Y
0x000089A4 VGT_COMPUTE_START_Z
0x000089A8 VGT_COMPUTE_INDEX
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x000089B0 VGT_HS_OFFCHIP_PARAM
0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
0x00008C00 SQ_CONFIG
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
0x00008DF8 SQ_CONST_MEM_BASE
0x00008E20 SQ_STATIC_THREAD_MGMT_1
0x00008E24 SQ_STATIC_THREAD_MGMT_2
0x00008E28 SQ_STATIC_THREAD_MGMT_3
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x00009508 TA_CNTL_AUX
0x00009830 DB_DEBUG
0x00009834 DB_DEBUG2
0x00009838 DB_DEBUG3
0x0000983C DB_DEBUG4
0x00009854 DB_WATERMARKS
0x0000A400 TD_PS_BORDER_COLOR_INDEX
0x0000A404 TD_PS_BORDER_COLOR_RED
0x0000A408 TD_PS_BORDER_COLOR_GREEN
0x0000A40C TD_PS_BORDER_COLOR_BLUE
0x0000A410 TD_PS_BORDER_COLOR_ALPHA
0x0000A414 TD_VS_BORDER_COLOR_INDEX
0x0000A418 TD_VS_BORDER_COLOR_RED
0x0000A41C TD_VS_BORDER_COLOR_GREEN
0x0000A420 TD_VS_BORDER_COLOR_BLUE
0x0000A424 TD_VS_BORDER_COLOR_ALPHA
0x0000A428 TD_GS_BORDER_COLOR_INDEX
0x0000A42C TD_GS_BORDER_COLOR_RED
0x0000A430 TD_GS_BORDER_COLOR_GREEN
0x0000A434 TD_GS_BORDER_COLOR_BLUE
0x0000A438 TD_GS_BORDER_COLOR_ALPHA
0x0000A43C TD_HS_BORDER_COLOR_INDEX
0x0000A440 TD_HS_BORDER_COLOR_RED
0x0000A444 TD_HS_BORDER_COLOR_GREEN
0x0000A448 TD_HS_BORDER_COLOR_BLUE
0x0000A44C TD_HS_BORDER_COLOR_ALPHA
0x0000A450 TD_LS_BORDER_COLOR_INDEX
0x0000A454 TD_LS_BORDER_COLOR_RED
0x0000A458 TD_LS_BORDER_COLOR_GREEN
0x0000A45C TD_LS_BORDER_COLOR_BLUE
0x0000A460 TD_LS_BORDER_COLOR_ALPHA
0x0000A464 TD_CS_BORDER_COLOR_INDEX
0x0000A468 TD_CS_BORDER_COLOR_RED
0x0000A46C TD_CS_BORDER_COLOR_GREEN
0x0000A470 TD_CS_BORDER_COLOR_BLUE
0x0000A474 TD_CS_BORDER_COLOR_ALPHA
0x00028000 DB_RENDER_CONTROL
0x00028004 DB_COUNT_CONTROL
0x0002800C DB_RENDER_OVERRIDE
0x00028010 DB_RENDER_OVERRIDE2
0x00028028 DB_STENCIL_CLEAR
0x0002802C DB_DEPTH_CLEAR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
0x00028200 PA_SC_WINDOW_OFFSET
0x00028204 PA_SC_WINDOW_SCISSOR_TL
0x00028208 PA_SC_WINDOW_SCISSOR_BR
0x0002820C PA_SC_CLIPRECT_RULE
0x00028210 PA_SC_CLIPRECT_0_TL
0x00028214 PA_SC_CLIPRECT_0_BR
0x00028218 PA_SC_CLIPRECT_1_TL
0x0002821C PA_SC_CLIPRECT_1_BR
0x00028220 PA_SC_CLIPRECT_2_TL
0x00028224 PA_SC_CLIPRECT_2_BR
0x00028228 PA_SC_CLIPRECT_3_TL
0x0002822C PA_SC_CLIPRECT_3_BR
0x00028230 PA_SC_EDGERULE
0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
0x00028240 PA_SC_GENERIC_SCISSOR_TL
0x00028244 PA_SC_GENERIC_SCISSOR_BR
0x00028250 PA_SC_VPORT_SCISSOR_0_TL
0x00028254 PA_SC_VPORT_SCISSOR_0_BR
0x00028258 PA_SC_VPORT_SCISSOR_1_TL
0x0002825C PA_SC_VPORT_SCISSOR_1_BR
0x00028260 PA_SC_VPORT_SCISSOR_2_TL
0x00028264 PA_SC_VPORT_SCISSOR_2_BR
0x00028268 PA_SC_VPORT_SCISSOR_3_TL
0x0002826C PA_SC_VPORT_SCISSOR_3_BR
0x00028270 PA_SC_VPORT_SCISSOR_4_TL
0x00028274 PA_SC_VPORT_SCISSOR_4_BR
0x00028278 PA_SC_VPORT_SCISSOR_5_TL
0x0002827C PA_SC_VPORT_SCISSOR_5_BR
0x00028280 PA_SC_VPORT_SCISSOR_6_TL
0x00028284 PA_SC_VPORT_SCISSOR_6_BR
0x00028288 PA_SC_VPORT_SCISSOR_7_TL
0x0002828C PA_SC_VPORT_SCISSOR_7_BR
0x00028290 PA_SC_VPORT_SCISSOR_8_TL
0x00028294 PA_SC_VPORT_SCISSOR_8_BR
0x00028298 PA_SC_VPORT_SCISSOR_9_TL
0x0002829C PA_SC_VPORT_SCISSOR_9_BR
0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
0x000282AC PA_SC_VPORT_SCISSOR_11_BR
0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
0x000282BC PA_SC_VPORT_SCISSOR_13_BR
0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
0x000282CC PA_SC_VPORT_SCISSOR_15_BR
0x000282D0 PA_SC_VPORT_ZMIN_0
0x000282D4 PA_SC_VPORT_ZMAX_0
0x000282D8 PA_SC_VPORT_ZMIN_1
0x000282DC PA_SC_VPORT_ZMAX_1
0x000282E0 PA_SC_VPORT_ZMIN_2
0x000282E4 PA_SC_VPORT_ZMAX_2
0x000282E8 PA_SC_VPORT_ZMIN_3
0x000282EC PA_SC_VPORT_ZMAX_3
0x000282F0 PA_SC_VPORT_ZMIN_4
0x000282F4 PA_SC_VPORT_ZMAX_4
0x000282F8 PA_SC_VPORT_ZMIN_5
0x000282FC PA_SC_VPORT_ZMAX_5
0x00028300 PA_SC_VPORT_ZMIN_6
0x00028304 PA_SC_VPORT_ZMAX_6
0x00028308 PA_SC_VPORT_ZMIN_7
0x0002830C PA_SC_VPORT_ZMAX_7
0x00028310 PA_SC_VPORT_ZMIN_8
0x00028314 PA_SC_VPORT_ZMAX_8
0x00028318 PA_SC_VPORT_ZMIN_9
0x0002831C PA_SC_VPORT_ZMAX_9
0x00028320 PA_SC_VPORT_ZMIN_10
0x00028324 PA_SC_VPORT_ZMAX_10
0x00028328 PA_SC_VPORT_ZMIN_11
0x0002832C PA_SC_VPORT_ZMAX_11
0x00028330 PA_SC_VPORT_ZMIN_12
0x00028334 PA_SC_VPORT_ZMAX_12
0x00028338 PA_SC_VPORT_ZMIN_13
0x0002833C PA_SC_VPORT_ZMAX_13
0x00028340 PA_SC_VPORT_ZMIN_14
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028354 SX_SURFACE_SYNC
0x0002835C SX_SCATTER_EXPORT_SIZE
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
0x0002838C SQ_VTX_SEMANTIC_3
0x00028390 SQ_VTX_SEMANTIC_4
0x00028394 SQ_VTX_SEMANTIC_5
0x00028398 SQ_VTX_SEMANTIC_6
0x0002839C SQ_VTX_SEMANTIC_7
0x000283A0 SQ_VTX_SEMANTIC_8
0x000283A4 SQ_VTX_SEMANTIC_9
0x000283A8 SQ_VTX_SEMANTIC_10
0x000283AC SQ_VTX_SEMANTIC_11
0x000283B0 SQ_VTX_SEMANTIC_12
0x000283B4 SQ_VTX_SEMANTIC_13
0x000283B8 SQ_VTX_SEMANTIC_14
0x000283BC SQ_VTX_SEMANTIC_15
0x000283C0 SQ_VTX_SEMANTIC_16
0x000283C4 SQ_VTX_SEMANTIC_17
0x000283C8 SQ_VTX_SEMANTIC_18
0x000283CC SQ_VTX_SEMANTIC_19
0x000283D0 SQ_VTX_SEMANTIC_20
0x000283D4 SQ_VTX_SEMANTIC_21
0x000283D8 SQ_VTX_SEMANTIC_22
0x000283DC SQ_VTX_SEMANTIC_23
0x000283E0 SQ_VTX_SEMANTIC_24
0x000283E4 SQ_VTX_SEMANTIC_25
0x000283E8 SQ_VTX_SEMANTIC_26
0x000283EC SQ_VTX_SEMANTIC_27
0x000283F0 SQ_VTX_SEMANTIC_28
0x000283F4 SQ_VTX_SEMANTIC_29
0x000283F8 SQ_VTX_SEMANTIC_30
0x000283FC SQ_VTX_SEMANTIC_31
0x00028400 VGT_MAX_VTX_INDX
0x00028404 VGT_MIN_VTX_INDX
0x00028408 VGT_INDX_OFFSET
0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028414 CB_BLEND_RED
0x00028418 CB_BLEND_GREEN
0x0002841C CB_BLEND_BLUE
0x00028420 CB_BLEND_ALPHA
0x00028430 DB_STENCILREFMASK
0x00028434 DB_STENCILREFMASK_BF
0x00028438 SX_ALPHA_REF
0x0002843C PA_CL_VPORT_XSCALE_0
0x00028440 PA_CL_VPORT_XOFFSET_0
0x00028444 PA_CL_VPORT_YSCALE_0
0x00028448 PA_CL_VPORT_YOFFSET_0
0x0002844C PA_CL_VPORT_ZSCALE_0
0x00028450 PA_CL_VPORT_ZOFFSET_0
0x00028454 PA_CL_VPORT_XSCALE_1
0x00028458 PA_CL_VPORT_XOFFSET_1
0x0002845C PA_CL_VPORT_YSCALE_1
0x00028460 PA_CL_VPORT_YOFFSET_1
0x00028464 PA_CL_VPORT_ZSCALE_1
0x00028468 PA_CL_VPORT_ZOFFSET_1
0x0002846C PA_CL_VPORT_XSCALE_2
0x00028470 PA_CL_VPORT_XOFFSET_2
0x00028474 PA_CL_VPORT_YSCALE_2
0x00028478 PA_CL_VPORT_YOFFSET_2
0x0002847C PA_CL_VPORT_ZSCALE_2
0x00028480 PA_CL_VPORT_ZOFFSET_2
0x00028484 PA_CL_VPORT_XSCALE_3
0x00028488 PA_CL_VPORT_XOFFSET_3
0x0002848C PA_CL_VPORT_YSCALE_3
0x00028490 PA_CL_VPORT_YOFFSET_3
0x00028494 PA_CL_VPORT_ZSCALE_3
0x00028498 PA_CL_VPORT_ZOFFSET_3
0x0002849C PA_CL_VPORT_XSCALE_4
0x000284A0 PA_CL_VPORT_XOFFSET_4
0x000284A4 PA_CL_VPORT_YSCALE_4
0x000284A8 PA_CL_VPORT_YOFFSET_4
0x000284AC PA_CL_VPORT_ZSCALE_4
0x000284B0 PA_CL_VPORT_ZOFFSET_4
0x000284B4 PA_CL_VPORT_XSCALE_5
0x000284B8 PA_CL_VPORT_XOFFSET_5
0x000284BC PA_CL_VPORT_YSCALE_5
0x000284C0 PA_CL_VPORT_YOFFSET_5
0x000284C4 PA_CL_VPORT_ZSCALE_5
0x000284C8 PA_CL_VPORT_ZOFFSET_5
0x000284CC PA_CL_VPORT_XSCALE_6
0x000284D0 PA_CL_VPORT_XOFFSET_6
0x000284D4 PA_CL_VPORT_YSCALE_6
0x000284D8 PA_CL_VPORT_YOFFSET_6
0x000284DC PA_CL_VPORT_ZSCALE_6
0x000284E0 PA_CL_VPORT_ZOFFSET_6
0x000284E4 PA_CL_VPORT_XSCALE_7
0x000284E8 PA_CL_VPORT_XOFFSET_7
0x000284EC PA_CL_VPORT_YSCALE_7
0x000284F0 PA_CL_VPORT_YOFFSET_7
0x000284F4 PA_CL_VPORT_ZSCALE_7
0x000284F8 PA_CL_VPORT_ZOFFSET_7
0x000284FC PA_CL_VPORT_XSCALE_8
0x00028500 PA_CL_VPORT_XOFFSET_8
0x00028504 PA_CL_VPORT_YSCALE_8
0x00028508 PA_CL_VPORT_YOFFSET_8
0x0002850C PA_CL_VPORT_ZSCALE_8
0x00028510 PA_CL_VPORT_ZOFFSET_8
0x00028514 PA_CL_VPORT_XSCALE_9
0x00028518 PA_CL_VPORT_XOFFSET_9
0x0002851C PA_CL_VPORT_YSCALE_9
0x00028520 PA_CL_VPORT_YOFFSET_9
0x00028524 PA_CL_VPORT_ZSCALE_9
0x00028528 PA_CL_VPORT_ZOFFSET_9
0x0002852C PA_CL_VPORT_XSCALE_10
0x00028530 PA_CL_VPORT_XOFFSET_10
0x00028534 PA_CL_VPORT_YSCALE_10
0x00028538 PA_CL_VPORT_YOFFSET_10
0x0002853C PA_CL_VPORT_ZSCALE_10
0x00028540 PA_CL_VPORT_ZOFFSET_10
0x00028544 PA_CL_VPORT_XSCALE_11
0x00028548 PA_CL_VPORT_XOFFSET_11
0x0002854C PA_CL_VPORT_YSCALE_11
0x00028550 PA_CL_VPORT_YOFFSET_11
0x00028554 PA_CL_VPORT_ZSCALE_11
0x00028558 PA_CL_VPORT_ZOFFSET_11
0x0002855C PA_CL_VPORT_XSCALE_12
0x00028560 PA_CL_VPORT_XOFFSET_12
0x00028564 PA_CL_VPORT_YSCALE_12
0x00028568 PA_CL_VPORT_YOFFSET_12
0x0002856C PA_CL_VPORT_ZSCALE_12
0x00028570 PA_CL_VPORT_ZOFFSET_12
0x00028574 PA_CL_VPORT_XSCALE_13
0x00028578 PA_CL_VPORT_XOFFSET_13
0x0002857C PA_CL_VPORT_YSCALE_13
0x00028580 PA_CL_VPORT_YOFFSET_13
0x00028584 PA_CL_VPORT_ZSCALE_13
0x00028588 PA_CL_VPORT_ZOFFSET_13
0x0002858C PA_CL_VPORT_XSCALE_14
0x00028590 PA_CL_VPORT_XOFFSET_14
0x00028594 PA_CL_VPORT_YSCALE_14
0x00028598 PA_CL_VPORT_YOFFSET_14
0x0002859C PA_CL_VPORT_ZSCALE_14
0x000285A0 PA_CL_VPORT_ZOFFSET_14
0x000285A4 PA_CL_VPORT_XSCALE_15
0x000285A8 PA_CL_VPORT_XOFFSET_15
0x000285AC PA_CL_VPORT_YSCALE_15
0x000285B0 PA_CL_VPORT_YOFFSET_15
0x000285B4 PA_CL_VPORT_ZSCALE_15
0x000285B8 PA_CL_VPORT_ZOFFSET_15
0x000285BC PA_CL_UCP_0_X
0x000285C0 PA_CL_UCP_0_Y
0x000285C4 PA_CL_UCP_0_Z
0x000285C8 PA_CL_UCP_0_W
0x000285CC PA_CL_UCP_1_X
0x000285D0 PA_CL_UCP_1_Y
0x000285D4 PA_CL_UCP_1_Z
0x000285D8 PA_CL_UCP_1_W
0x000285DC PA_CL_UCP_2_X
0x000285E0 PA_CL_UCP_2_Y
0x000285E4 PA_CL_UCP_2_Z
0x000285E8 PA_CL_UCP_2_W
0x000285EC PA_CL_UCP_3_X
0x000285F0 PA_CL_UCP_3_Y
0x000285F4 PA_CL_UCP_3_Z
0x000285F8 PA_CL_UCP_3_W
0x000285FC PA_CL_UCP_4_X
0x00028600 PA_CL_UCP_4_Y
0x00028604 PA_CL_UCP_4_Z
0x00028608 PA_CL_UCP_4_W
0x0002860C PA_CL_UCP_5_X
0x00028610 PA_CL_UCP_5_Y
0x00028614 PA_CL_UCP_5_Z
0x00028618 PA_CL_UCP_5_W
0x0002861C SPI_VS_OUT_ID_0
0x00028620 SPI_VS_OUT_ID_1
0x00028624 SPI_VS_OUT_ID_2
0x00028628 SPI_VS_OUT_ID_3
0x0002862C SPI_VS_OUT_ID_4
0x00028630 SPI_VS_OUT_ID_5
0x00028634 SPI_VS_OUT_ID_6
0x00028638 SPI_VS_OUT_ID_7
0x0002863C SPI_VS_OUT_ID_8
0x00028640 SPI_VS_OUT_ID_9
0x00028644 SPI_PS_INPUT_CNTL_0
0x00028648 SPI_PS_INPUT_CNTL_1
0x0002864C SPI_PS_INPUT_CNTL_2
0x00028650 SPI_PS_INPUT_CNTL_3
0x00028654 SPI_PS_INPUT_CNTL_4
0x00028658 SPI_PS_INPUT_CNTL_5
0x0002865C SPI_PS_INPUT_CNTL_6
0x00028660 SPI_PS_INPUT_CNTL_7
0x00028664 SPI_PS_INPUT_CNTL_8
0x00028668 SPI_PS_INPUT_CNTL_9
0x0002866C SPI_PS_INPUT_CNTL_10
0x00028670 SPI_PS_INPUT_CNTL_11
0x00028674 SPI_PS_INPUT_CNTL_12
0x00028678 SPI_PS_INPUT_CNTL_13
0x0002867C SPI_PS_INPUT_CNTL_14
0x00028680 SPI_PS_INPUT_CNTL_15
0x00028684 SPI_PS_INPUT_CNTL_16
0x00028688 SPI_PS_INPUT_CNTL_17
0x0002868C SPI_PS_INPUT_CNTL_18
0x00028690 SPI_PS_INPUT_CNTL_19
0x00028694 SPI_PS_INPUT_CNTL_20
0x00028698 SPI_PS_INPUT_CNTL_21
0x0002869C SPI_PS_INPUT_CNTL_22
0x000286A0 SPI_PS_INPUT_CNTL_23
0x000286A4 SPI_PS_INPUT_CNTL_24
0x000286A8 SPI_PS_INPUT_CNTL_25
0x000286AC SPI_PS_INPUT_CNTL_26
0x000286B0 SPI_PS_INPUT_CNTL_27
0x000286B4 SPI_PS_INPUT_CNTL_28
0x000286B8 SPI_PS_INPUT_CNTL_29
0x000286BC SPI_PS_INPUT_CNTL_30
0x000286C0 SPI_PS_INPUT_CNTL_31
0x000286C4 SPI_VS_OUT_CONFIG
0x000286C8 SPI_THREAD_GROUPING
0x000286CC SPI_PS_IN_CONTROL_0
0x000286D0 SPI_PS_IN_CONTROL_1
0x000286D4 SPI_INTERP_CONTROL_0
0x000286D8 SPI_INPUT_Z
0x000286DC SPI_FOG_CNTL
0x000286E0 SPI_BARYC_CNTL
0x000286E4 SPI_PS_IN_CONTROL_2
0x000286E8 SPI_COMPUTE_INPUT_CNTL
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
0x000286F8 SPI_GPR_MGMT
0x000286FC SPI_LDS_MGMT
0x00028700 SPI_STACK_MGMT
0x00028704 SPI_WAVE_MGMT_1
0x00028708 SPI_WAVE_MGMT_2
0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
0x0002878C CB_BLEND3_CONTROL
0x00028790 CB_BLEND4_CONTROL
0x00028794 CB_BLEND5_CONTROL
0x00028798 CB_BLEND6_CONTROL
0x0002879C CB_BLEND7_CONTROL
0x000287CC CS_COPY_STATE
0x000287D0 GFX_COPY_STATE
0x000287D4 PA_CL_POINT_X_RAD
0x000287D8 PA_CL_POINT_Y_RAD
0x000287DC PA_CL_POINT_SIZE
0x000287E0 PA_CL_POINT_CULL_RAD
0x00028808 CB_COLOR_CONTROL
0x0002880C DB_SHADER_CONTROL
0x00028810 PA_CL_CLIP_CNTL
0x00028814 PA_SU_SC_MODE_CNTL
0x00028818 PA_CL_VTE_CNTL
0x0002881C PA_CL_VS_OUT_CNTL
0x00028820 PA_CL_NANINF_CNTL
0x00028824 PA_SU_LINE_STIPPLE_CNTL
0x00028828 PA_SU_LINE_STIPPLE_SCALE
0x0002882C PA_SU_PRIM_FILTER_CNTL
0x00028844 SQ_PGM_RESOURCES_PS
0x00028848 SQ_PGM_RESOURCES_2_PS
0x0002884C SQ_PGM_EXPORTS_PS
0x00028860 SQ_PGM_RESOURCES_VS
0x00028864 SQ_PGM_RESOURCES_2_VS
0x00028878 SQ_PGM_RESOURCES_GS
0x0002887C SQ_PGM_RESOURCES_2_GS
0x00028890 SQ_PGM_RESOURCES_ES
0x00028894 SQ_PGM_RESOURCES_2_ES
0x000288A8 SQ_PGM_RESOURCES_FS
0x000288BC SQ_PGM_RESOURCES_HS
0x000288C0 SQ_PGM_RESOURCES_2_HS
0x000288D4 SQ_PGM_RESOURCES_LS
0x000288D8 SQ_PGM_RESOURCES_2_LS
0x000288E8 SQ_LDS_ALLOC
0x000288EC SQ_LDS_ALLOC_PS
0x000288F0 SQ_VTX_SEMANTIC_CLEAR
0x00028A00 PA_SU_POINT_SIZE
0x00028A04 PA_SU_POINT_MINMAX
0x00028A08 PA_SU_LINE_CNTL
0x00028A0C PA_SC_LINE_STIPPLE
0x00028A10 VGT_OUTPUT_PATH_CNTL
0x00028A14 VGT_HOS_CNTL
0x00028A18 VGT_HOS_MAX_TESS_LEVEL
0x00028A1C VGT_HOS_MIN_TESS_LEVEL
0x00028A20 VGT_HOS_REUSE_DEPTH
0x00028A24 VGT_GROUP_PRIM_TYPE
0x00028A28 VGT_GROUP_FIRST_DECR
0x00028A2C VGT_GROUP_DECR
0x00028A30 VGT_GROUP_VECT_0_CNTL
0x00028A34 VGT_GROUP_VECT_1_CNTL
0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A48 PA_SC_MODE_CNTL_0
0x00028A4C PA_SC_MODE_CNTL_1
0x00028A50 VGT_ENHANCE
0x00028A54 VGT_GS_PER_ES
0x00028A58 VGT_ES_PER_GS
0x00028A5C VGT_GS_PER_VS
0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x00028A70 IA_ENHANCE
0x00028A84 VGT_PRIMITIVEID_EN
0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
0x00028AA0 VGT_INSTANCE_STEP_RATE_0
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
0x00028AA8 IA_MULTI_VGT_PARAM
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
0x00028B6C VGT_TF_PARAM
0x00028B70 DB_ALPHA_TO_MASK
0x00028B74 VGT_DISPATCH_INITIATOR
0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
0x00028B7C PA_SU_POLY_OFFSET_CLAMP
0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B74 VGT_GS_INSTANCE_CNT
0x00028BD4 PA_SC_CENTROID_PRIORITY_0
0x00028BD8 PA_SC_CENTROID_PRIORITY_1
0x00028BDC PA_SC_LINE_CNTL
0x00028BE4 PA_SU_VTX_CNTL
0x00028BE8 PA_CL_GB_VERT_CLIP_ADJ
0x00028BEC PA_CL_GB_VERT_DISC_ADJ
0x00028BF0 PA_CL_GB_HORZ_CLIP_ADJ
0x00028BF4 PA_CL_GB_HORZ_DISC_ADJ
0x00028BF8 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_0
0x00028BFC PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_1
0x00028C00 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_2
0x00028C04 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y0_3
0x00028C08 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_0
0x00028C0C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_1
0x00028C10 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_2
0x00028C14 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y0_3
0x00028C18 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_0
0x00028C1C PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_1
0x00028C20 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_2
0x00028C24 PA_SC_AA_SAMPLE_LOCS_PIXEL_X0_Y1_3
0x00028C28 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_0
0x00028C2C PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_1
0x00028C30 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_2
0x00028C34 PA_SC_AA_SAMPLE_LOCS_PIXEL_X1_Y1_3
0x00028C38 PA_SC_AA_MASK_X0_Y0_X1_Y0
0x00028C3C PA_SC_AA_MASK_X0_Y1_X1_Y1
0x00028C78 CB_COLOR0_DIM
0x00028CB4 CB_COLOR1_DIM
0x00028CF0 CB_COLOR2_DIM
0x00028D2C CB_COLOR3_DIM
0x00028D68 CB_COLOR4_DIM
0x00028DA4 CB_COLOR5_DIM
0x00028DE0 CB_COLOR6_DIM
0x00028E1C CB_COLOR7_DIM
0x00028E58 CB_COLOR8_DIM
0x00028E74 CB_COLOR9_DIM
0x00028E90 CB_COLOR10_DIM
0x00028EAC CB_COLOR11_DIM
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
0x00028C98 CB_COLOR0_CLEAR_WORD3
0x00028CC8 CB_COLOR1_CLEAR_WORD0
0x00028CCC CB_COLOR1_CLEAR_WORD1
0x00028CD0 CB_COLOR1_CLEAR_WORD2
0x00028CD4 CB_COLOR1_CLEAR_WORD3
0x00028D04 CB_COLOR2_CLEAR_WORD0
0x00028D08 CB_COLOR2_CLEAR_WORD1
0x00028D0C CB_COLOR2_CLEAR_WORD2
0x00028D10 CB_COLOR2_CLEAR_WORD3
0x00028D40 CB_COLOR3_CLEAR_WORD0
0x00028D44 CB_COLOR3_CLEAR_WORD1
0x00028D48 CB_COLOR3_CLEAR_WORD2
0x00028D4C CB_COLOR3_CLEAR_WORD3
0x00028D7C CB_COLOR4_CLEAR_WORD0
0x00028D80 CB_COLOR4_CLEAR_WORD1
0x00028D84 CB_COLOR4_CLEAR_WORD2
0x00028D88 CB_COLOR4_CLEAR_WORD3
0x00028DB8 CB_COLOR5_CLEAR_WORD0
0x00028DBC CB_COLOR5_CLEAR_WORD1
0x00028DC0 CB_COLOR5_CLEAR_WORD2
0x00028DC4 CB_COLOR5_CLEAR_WORD3
0x00028DF4 CB_COLOR6_CLEAR_WORD0
0x00028DF8 CB_COLOR6_CLEAR_WORD1
0x00028DFC CB_COLOR6_CLEAR_WORD2
0x00028E00 CB_COLOR6_CLEAR_WORD3
0x00028E30 CB_COLOR7_CLEAR_WORD0
0x00028E34 CB_COLOR7_CLEAR_WORD1
0x00028E38 CB_COLOR7_CLEAR_WORD2
0x00028E3C CB_COLOR7_CLEAR_WORD3
0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
0x0003CFF0 SQ_VTX_BASE_VTX_LOC
0x0003CFF4 SQ_VTX_START_INST_LOC
0x0003FF00 SQ_TEX_SAMPLER_CLEAR
0x0003FF04 SQ_TEX_RESOURCE_CLEAR
0x0003FF08 SQ_LOOP_BOOL_CLEAR
/drivers/video/drm/radeon/reg_srcs/evergreen
0,0 → 1,644
evergreen 0x9400
0x0000802C GRBM_GFX_INDEX
0x00008040 WAIT_UNTIL
0x00008044 WAIT_UNTIL_POLL_CNTL
0x00008048 WAIT_UNTIL_POLL_MASK
0x0000804c WAIT_UNTIL_POLL_REFDATA
0x000084FC CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x000088C4 VGT_CACHE_INVALIDATION
0x000088D4 VGT_GS_VERTEX_REUSE
0x00008958 VGT_PRIMITIVE_TYPE
0x0000895C VGT_INDEX_TYPE
0x00008970 VGT_NUM_INDICES
0x00008974 VGT_NUM_INSTANCES
0x00008990 VGT_COMPUTE_DIM_X
0x00008994 VGT_COMPUTE_DIM_Y
0x00008998 VGT_COMPUTE_DIM_Z
0x0000899C VGT_COMPUTE_START_X
0x000089A0 VGT_COMPUTE_START_Y
0x000089A4 VGT_COMPUTE_START_Z
0x000089AC VGT_COMPUTE_THREAD_GOURP_SIZE
0x00008A14 PA_CL_ENHANCE
0x00008A60 PA_SC_LINE_STIPPLE_VALUE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00008BF0 PA_SC_ENHANCE
0x00008D8C SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
0x00008D90 SQ_DYN_GPR_OPTIMIZATION
0x00008D94 SQ_DYN_GPR_SIMD_LOCK_EN
0x00008D98 SQ_DYN_GPR_THREAD_LIMIT
0x00008D9C SQ_DYN_GPR_LDS_LIMIT
0x00008C00 SQ_CONFIG
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
0x00008C0C SQ_GPR_RESOURCE_MGMT_3
0x00008C10 SQ_GLOBAL_GPR_RESOURCE_MGMT_1
0x00008C14 SQ_GLOBAL_GPR_RESOURCE_MGMT_2
0x00008C18 SQ_THREAD_RESOURCE_MGMT
0x00008C1C SQ_THREAD_RESOURCE_MGMT_2
0x00008C20 SQ_STACK_RESOURCE_MGMT_1
0x00008C24 SQ_STACK_RESOURCE_MGMT_2
0x00008C28 SQ_STACK_RESOURCE_MGMT_3
0x00008DF8 SQ_CONST_MEM_BASE
0x00008E20 SQ_STATIC_THREAD_MGMT_1
0x00008E24 SQ_STATIC_THREAD_MGMT_2
0x00008E28 SQ_STATIC_THREAD_MGMT_3
0x00008E2C SQ_LDS_RESOURCE_MGMT
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x00009508 TA_CNTL_AUX
0x00009700 VC_CNTL
0x00009714 VC_ENHANCE
0x00009830 DB_DEBUG
0x00009834 DB_DEBUG2
0x00009838 DB_DEBUG3
0x0000983C DB_DEBUG4
0x00009854 DB_WATERMARKS
0x0000A400 TD_PS_BORDER_COLOR_INDEX
0x0000A404 TD_PS_BORDER_COLOR_RED
0x0000A408 TD_PS_BORDER_COLOR_GREEN
0x0000A40C TD_PS_BORDER_COLOR_BLUE
0x0000A410 TD_PS_BORDER_COLOR_ALPHA
0x0000A414 TD_VS_BORDER_COLOR_INDEX
0x0000A418 TD_VS_BORDER_COLOR_RED
0x0000A41C TD_VS_BORDER_COLOR_GREEN
0x0000A420 TD_VS_BORDER_COLOR_BLUE
0x0000A424 TD_VS_BORDER_COLOR_ALPHA
0x0000A428 TD_GS_BORDER_COLOR_INDEX
0x0000A42C TD_GS_BORDER_COLOR_RED
0x0000A430 TD_GS_BORDER_COLOR_GREEN
0x0000A434 TD_GS_BORDER_COLOR_BLUE
0x0000A438 TD_GS_BORDER_COLOR_ALPHA
0x0000A43C TD_HS_BORDER_COLOR_INDEX
0x0000A440 TD_HS_BORDER_COLOR_RED
0x0000A444 TD_HS_BORDER_COLOR_GREEN
0x0000A448 TD_HS_BORDER_COLOR_BLUE
0x0000A44C TD_HS_BORDER_COLOR_ALPHA
0x0000A450 TD_LS_BORDER_COLOR_INDEX
0x0000A454 TD_LS_BORDER_COLOR_RED
0x0000A458 TD_LS_BORDER_COLOR_GREEN
0x0000A45C TD_LS_BORDER_COLOR_BLUE
0x0000A460 TD_LS_BORDER_COLOR_ALPHA
0x0000A464 TD_CS_BORDER_COLOR_INDEX
0x0000A468 TD_CS_BORDER_COLOR_RED
0x0000A46C TD_CS_BORDER_COLOR_GREEN
0x0000A470 TD_CS_BORDER_COLOR_BLUE
0x0000A474 TD_CS_BORDER_COLOR_ALPHA
0x00028000 DB_RENDER_CONTROL
0x00028004 DB_COUNT_CONTROL
0x0002800C DB_RENDER_OVERRIDE
0x00028010 DB_RENDER_OVERRIDE2
0x00028028 DB_STENCIL_CLEAR
0x0002802C DB_DEPTH_CLEAR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
0x00028200 PA_SC_WINDOW_OFFSET
0x00028204 PA_SC_WINDOW_SCISSOR_TL
0x00028208 PA_SC_WINDOW_SCISSOR_BR
0x0002820C PA_SC_CLIPRECT_RULE
0x00028210 PA_SC_CLIPRECT_0_TL
0x00028214 PA_SC_CLIPRECT_0_BR
0x00028218 PA_SC_CLIPRECT_1_TL
0x0002821C PA_SC_CLIPRECT_1_BR
0x00028220 PA_SC_CLIPRECT_2_TL
0x00028224 PA_SC_CLIPRECT_2_BR
0x00028228 PA_SC_CLIPRECT_3_TL
0x0002822C PA_SC_CLIPRECT_3_BR
0x00028230 PA_SC_EDGERULE
0x00028234 PA_SU_HARDWARE_SCREEN_OFFSET
0x00028240 PA_SC_GENERIC_SCISSOR_TL
0x00028244 PA_SC_GENERIC_SCISSOR_BR
0x00028250 PA_SC_VPORT_SCISSOR_0_TL
0x00028254 PA_SC_VPORT_SCISSOR_0_BR
0x00028258 PA_SC_VPORT_SCISSOR_1_TL
0x0002825C PA_SC_VPORT_SCISSOR_1_BR
0x00028260 PA_SC_VPORT_SCISSOR_2_TL
0x00028264 PA_SC_VPORT_SCISSOR_2_BR
0x00028268 PA_SC_VPORT_SCISSOR_3_TL
0x0002826C PA_SC_VPORT_SCISSOR_3_BR
0x00028270 PA_SC_VPORT_SCISSOR_4_TL
0x00028274 PA_SC_VPORT_SCISSOR_4_BR
0x00028278 PA_SC_VPORT_SCISSOR_5_TL
0x0002827C PA_SC_VPORT_SCISSOR_5_BR
0x00028280 PA_SC_VPORT_SCISSOR_6_TL
0x00028284 PA_SC_VPORT_SCISSOR_6_BR
0x00028288 PA_SC_VPORT_SCISSOR_7_TL
0x0002828C PA_SC_VPORT_SCISSOR_7_BR
0x00028290 PA_SC_VPORT_SCISSOR_8_TL
0x00028294 PA_SC_VPORT_SCISSOR_8_BR
0x00028298 PA_SC_VPORT_SCISSOR_9_TL
0x0002829C PA_SC_VPORT_SCISSOR_9_BR
0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
0x000282AC PA_SC_VPORT_SCISSOR_11_BR
0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
0x000282BC PA_SC_VPORT_SCISSOR_13_BR
0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
0x000282CC PA_SC_VPORT_SCISSOR_15_BR
0x000282D0 PA_SC_VPORT_ZMIN_0
0x000282D4 PA_SC_VPORT_ZMAX_0
0x000282D8 PA_SC_VPORT_ZMIN_1
0x000282DC PA_SC_VPORT_ZMAX_1
0x000282E0 PA_SC_VPORT_ZMIN_2
0x000282E4 PA_SC_VPORT_ZMAX_2
0x000282E8 PA_SC_VPORT_ZMIN_3
0x000282EC PA_SC_VPORT_ZMAX_3
0x000282F0 PA_SC_VPORT_ZMIN_4
0x000282F4 PA_SC_VPORT_ZMAX_4
0x000282F8 PA_SC_VPORT_ZMIN_5
0x000282FC PA_SC_VPORT_ZMAX_5
0x00028300 PA_SC_VPORT_ZMIN_6
0x00028304 PA_SC_VPORT_ZMAX_6
0x00028308 PA_SC_VPORT_ZMIN_7
0x0002830C PA_SC_VPORT_ZMAX_7
0x00028310 PA_SC_VPORT_ZMIN_8
0x00028314 PA_SC_VPORT_ZMAX_8
0x00028318 PA_SC_VPORT_ZMIN_9
0x0002831C PA_SC_VPORT_ZMAX_9
0x00028320 PA_SC_VPORT_ZMIN_10
0x00028324 PA_SC_VPORT_ZMAX_10
0x00028328 PA_SC_VPORT_ZMIN_11
0x0002832C PA_SC_VPORT_ZMAX_11
0x00028330 PA_SC_VPORT_ZMIN_12
0x00028334 PA_SC_VPORT_ZMAX_12
0x00028338 PA_SC_VPORT_ZMIN_13
0x0002833C PA_SC_VPORT_ZMAX_13
0x00028340 PA_SC_VPORT_ZMIN_14
0x00028344 PA_SC_VPORT_ZMAX_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x0002834C PA_SC_VPORT_ZMAX_15
0x00028354 SX_SURFACE_SYNC
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
0x0002838C SQ_VTX_SEMANTIC_3
0x00028390 SQ_VTX_SEMANTIC_4
0x00028394 SQ_VTX_SEMANTIC_5
0x00028398 SQ_VTX_SEMANTIC_6
0x0002839C SQ_VTX_SEMANTIC_7
0x000283A0 SQ_VTX_SEMANTIC_8
0x000283A4 SQ_VTX_SEMANTIC_9
0x000283A8 SQ_VTX_SEMANTIC_10
0x000283AC SQ_VTX_SEMANTIC_11
0x000283B0 SQ_VTX_SEMANTIC_12
0x000283B4 SQ_VTX_SEMANTIC_13
0x000283B8 SQ_VTX_SEMANTIC_14
0x000283BC SQ_VTX_SEMANTIC_15
0x000283C0 SQ_VTX_SEMANTIC_16
0x000283C4 SQ_VTX_SEMANTIC_17
0x000283C8 SQ_VTX_SEMANTIC_18
0x000283CC SQ_VTX_SEMANTIC_19
0x000283D0 SQ_VTX_SEMANTIC_20
0x000283D4 SQ_VTX_SEMANTIC_21
0x000283D8 SQ_VTX_SEMANTIC_22
0x000283DC SQ_VTX_SEMANTIC_23
0x000283E0 SQ_VTX_SEMANTIC_24
0x000283E4 SQ_VTX_SEMANTIC_25
0x000283E8 SQ_VTX_SEMANTIC_26
0x000283EC SQ_VTX_SEMANTIC_27
0x000283F0 SQ_VTX_SEMANTIC_28
0x000283F4 SQ_VTX_SEMANTIC_29
0x000283F8 SQ_VTX_SEMANTIC_30
0x000283FC SQ_VTX_SEMANTIC_31
0x00028400 VGT_MAX_VTX_INDX
0x00028404 VGT_MIN_VTX_INDX
0x00028408 VGT_INDX_OFFSET
0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028414 CB_BLEND_RED
0x00028418 CB_BLEND_GREEN
0x0002841C CB_BLEND_BLUE
0x00028420 CB_BLEND_ALPHA
0x00028430 DB_STENCILREFMASK
0x00028434 DB_STENCILREFMASK_BF
0x00028438 SX_ALPHA_REF
0x0002843C PA_CL_VPORT_XSCALE_0
0x00028440 PA_CL_VPORT_XOFFSET_0
0x00028444 PA_CL_VPORT_YSCALE_0
0x00028448 PA_CL_VPORT_YOFFSET_0
0x0002844C PA_CL_VPORT_ZSCALE_0
0x00028450 PA_CL_VPORT_ZOFFSET_0
0x00028454 PA_CL_VPORT_XSCALE_1
0x00028458 PA_CL_VPORT_XOFFSET_1
0x0002845C PA_CL_VPORT_YSCALE_1
0x00028460 PA_CL_VPORT_YOFFSET_1
0x00028464 PA_CL_VPORT_ZSCALE_1
0x00028468 PA_CL_VPORT_ZOFFSET_1
0x0002846C PA_CL_VPORT_XSCALE_2
0x00028470 PA_CL_VPORT_XOFFSET_2
0x00028474 PA_CL_VPORT_YSCALE_2
0x00028478 PA_CL_VPORT_YOFFSET_2
0x0002847C PA_CL_VPORT_ZSCALE_2
0x00028480 PA_CL_VPORT_ZOFFSET_2
0x00028484 PA_CL_VPORT_XSCALE_3
0x00028488 PA_CL_VPORT_XOFFSET_3
0x0002848C PA_CL_VPORT_YSCALE_3
0x00028490 PA_CL_VPORT_YOFFSET_3
0x00028494 PA_CL_VPORT_ZSCALE_3
0x00028498 PA_CL_VPORT_ZOFFSET_3
0x0002849C PA_CL_VPORT_XSCALE_4
0x000284A0 PA_CL_VPORT_XOFFSET_4
0x000284A4 PA_CL_VPORT_YSCALE_4
0x000284A8 PA_CL_VPORT_YOFFSET_4
0x000284AC PA_CL_VPORT_ZSCALE_4
0x000284B0 PA_CL_VPORT_ZOFFSET_4
0x000284B4 PA_CL_VPORT_XSCALE_5
0x000284B8 PA_CL_VPORT_XOFFSET_5
0x000284BC PA_CL_VPORT_YSCALE_5
0x000284C0 PA_CL_VPORT_YOFFSET_5
0x000284C4 PA_CL_VPORT_ZSCALE_5
0x000284C8 PA_CL_VPORT_ZOFFSET_5
0x000284CC PA_CL_VPORT_XSCALE_6
0x000284D0 PA_CL_VPORT_XOFFSET_6
0x000284D4 PA_CL_VPORT_YSCALE_6
0x000284D8 PA_CL_VPORT_YOFFSET_6
0x000284DC PA_CL_VPORT_ZSCALE_6
0x000284E0 PA_CL_VPORT_ZOFFSET_6
0x000284E4 PA_CL_VPORT_XSCALE_7
0x000284E8 PA_CL_VPORT_XOFFSET_7
0x000284EC PA_CL_VPORT_YSCALE_7
0x000284F0 PA_CL_VPORT_YOFFSET_7
0x000284F4 PA_CL_VPORT_ZSCALE_7
0x000284F8 PA_CL_VPORT_ZOFFSET_7
0x000284FC PA_CL_VPORT_XSCALE_8
0x00028500 PA_CL_VPORT_XOFFSET_8
0x00028504 PA_CL_VPORT_YSCALE_8
0x00028508 PA_CL_VPORT_YOFFSET_8
0x0002850C PA_CL_VPORT_ZSCALE_8
0x00028510 PA_CL_VPORT_ZOFFSET_8
0x00028514 PA_CL_VPORT_XSCALE_9
0x00028518 PA_CL_VPORT_XOFFSET_9
0x0002851C PA_CL_VPORT_YSCALE_9
0x00028520 PA_CL_VPORT_YOFFSET_9
0x00028524 PA_CL_VPORT_ZSCALE_9
0x00028528 PA_CL_VPORT_ZOFFSET_9
0x0002852C PA_CL_VPORT_XSCALE_10
0x00028530 PA_CL_VPORT_XOFFSET_10
0x00028534 PA_CL_VPORT_YSCALE_10
0x00028538 PA_CL_VPORT_YOFFSET_10
0x0002853C PA_CL_VPORT_ZSCALE_10
0x00028540 PA_CL_VPORT_ZOFFSET_10
0x00028544 PA_CL_VPORT_XSCALE_11
0x00028548 PA_CL_VPORT_XOFFSET_11
0x0002854C PA_CL_VPORT_YSCALE_11
0x00028550 PA_CL_VPORT_YOFFSET_11
0x00028554 PA_CL_VPORT_ZSCALE_11
0x00028558 PA_CL_VPORT_ZOFFSET_11
0x0002855C PA_CL_VPORT_XSCALE_12
0x00028560 PA_CL_VPORT_XOFFSET_12
0x00028564 PA_CL_VPORT_YSCALE_12
0x00028568 PA_CL_VPORT_YOFFSET_12
0x0002856C PA_CL_VPORT_ZSCALE_12
0x00028570 PA_CL_VPORT_ZOFFSET_12
0x00028574 PA_CL_VPORT_XSCALE_13
0x00028578 PA_CL_VPORT_XOFFSET_13
0x0002857C PA_CL_VPORT_YSCALE_13
0x00028580 PA_CL_VPORT_YOFFSET_13
0x00028584 PA_CL_VPORT_ZSCALE_13
0x00028588 PA_CL_VPORT_ZOFFSET_13
0x0002858C PA_CL_VPORT_XSCALE_14
0x00028590 PA_CL_VPORT_XOFFSET_14
0x00028594 PA_CL_VPORT_YSCALE_14
0x00028598 PA_CL_VPORT_YOFFSET_14
0x0002859C PA_CL_VPORT_ZSCALE_14
0x000285A0 PA_CL_VPORT_ZOFFSET_14
0x000285A4 PA_CL_VPORT_XSCALE_15
0x000285A8 PA_CL_VPORT_XOFFSET_15
0x000285AC PA_CL_VPORT_YSCALE_15
0x000285B0 PA_CL_VPORT_YOFFSET_15
0x000285B4 PA_CL_VPORT_ZSCALE_15
0x000285B8 PA_CL_VPORT_ZOFFSET_15
0x000285BC PA_CL_UCP_0_X
0x000285C0 PA_CL_UCP_0_Y
0x000285C4 PA_CL_UCP_0_Z
0x000285C8 PA_CL_UCP_0_W
0x000285CC PA_CL_UCP_1_X
0x000285D0 PA_CL_UCP_1_Y
0x000285D4 PA_CL_UCP_1_Z
0x000285D8 PA_CL_UCP_1_W
0x000285DC PA_CL_UCP_2_X
0x000285E0 PA_CL_UCP_2_Y
0x000285E4 PA_CL_UCP_2_Z
0x000285E8 PA_CL_UCP_2_W
0x000285EC PA_CL_UCP_3_X
0x000285F0 PA_CL_UCP_3_Y
0x000285F4 PA_CL_UCP_3_Z
0x000285F8 PA_CL_UCP_3_W
0x000285FC PA_CL_UCP_4_X
0x00028600 PA_CL_UCP_4_Y
0x00028604 PA_CL_UCP_4_Z
0x00028608 PA_CL_UCP_4_W
0x0002860C PA_CL_UCP_5_X
0x00028610 PA_CL_UCP_5_Y
0x00028614 PA_CL_UCP_5_Z
0x00028618 PA_CL_UCP_5_W
0x0002861C SPI_VS_OUT_ID_0
0x00028620 SPI_VS_OUT_ID_1
0x00028624 SPI_VS_OUT_ID_2
0x00028628 SPI_VS_OUT_ID_3
0x0002862C SPI_VS_OUT_ID_4
0x00028630 SPI_VS_OUT_ID_5
0x00028634 SPI_VS_OUT_ID_6
0x00028638 SPI_VS_OUT_ID_7
0x0002863C SPI_VS_OUT_ID_8
0x00028640 SPI_VS_OUT_ID_9
0x00028644 SPI_PS_INPUT_CNTL_0
0x00028648 SPI_PS_INPUT_CNTL_1
0x0002864C SPI_PS_INPUT_CNTL_2
0x00028650 SPI_PS_INPUT_CNTL_3
0x00028654 SPI_PS_INPUT_CNTL_4
0x00028658 SPI_PS_INPUT_CNTL_5
0x0002865C SPI_PS_INPUT_CNTL_6
0x00028660 SPI_PS_INPUT_CNTL_7
0x00028664 SPI_PS_INPUT_CNTL_8
0x00028668 SPI_PS_INPUT_CNTL_9
0x0002866C SPI_PS_INPUT_CNTL_10
0x00028670 SPI_PS_INPUT_CNTL_11
0x00028674 SPI_PS_INPUT_CNTL_12
0x00028678 SPI_PS_INPUT_CNTL_13
0x0002867C SPI_PS_INPUT_CNTL_14
0x00028680 SPI_PS_INPUT_CNTL_15
0x00028684 SPI_PS_INPUT_CNTL_16
0x00028688 SPI_PS_INPUT_CNTL_17
0x0002868C SPI_PS_INPUT_CNTL_18
0x00028690 SPI_PS_INPUT_CNTL_19
0x00028694 SPI_PS_INPUT_CNTL_20
0x00028698 SPI_PS_INPUT_CNTL_21
0x0002869C SPI_PS_INPUT_CNTL_22
0x000286A0 SPI_PS_INPUT_CNTL_23
0x000286A4 SPI_PS_INPUT_CNTL_24
0x000286A8 SPI_PS_INPUT_CNTL_25
0x000286AC SPI_PS_INPUT_CNTL_26
0x000286B0 SPI_PS_INPUT_CNTL_27
0x000286B4 SPI_PS_INPUT_CNTL_28
0x000286B8 SPI_PS_INPUT_CNTL_29
0x000286BC SPI_PS_INPUT_CNTL_30
0x000286C0 SPI_PS_INPUT_CNTL_31
0x000286C4 SPI_VS_OUT_CONFIG
0x000286C8 SPI_THREAD_GROUPING
0x000286CC SPI_PS_IN_CONTROL_0
0x000286D0 SPI_PS_IN_CONTROL_1
0x000286D4 SPI_INTERP_CONTROL_0
0x000286D8 SPI_INPUT_Z
0x000286DC SPI_FOG_CNTL
0x000286E0 SPI_BARYC_CNTL
0x000286E4 SPI_PS_IN_CONTROL_2
0x000286E8 SPI_COMPUTE_INPUT_CNTL
0x000286EC SPI_COMPUTE_NUM_THREAD_X
0x000286F0 SPI_COMPUTE_NUM_THREAD_Y
0x000286F4 SPI_COMPUTE_NUM_THREAD_Z
0x00028720 GDS_ADDR_BASE
0x00028724 GDS_ADDR_SIZE
0x00028728 GDS_ORDERED_WAVE_PER_SE
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
0x0002878C CB_BLEND3_CONTROL
0x00028790 CB_BLEND4_CONTROL
0x00028794 CB_BLEND5_CONTROL
0x00028798 CB_BLEND6_CONTROL
0x0002879C CB_BLEND7_CONTROL
0x000287CC CS_COPY_STATE
0x000287D0 GFX_COPY_STATE
0x000287D4 PA_CL_POINT_X_RAD
0x000287D8 PA_CL_POINT_Y_RAD
0x000287DC PA_CL_POINT_SIZE
0x000287E0 PA_CL_POINT_CULL_RAD
0x00028808 CB_COLOR_CONTROL
0x0002880C DB_SHADER_CONTROL
0x00028810 PA_CL_CLIP_CNTL
0x00028814 PA_SU_SC_MODE_CNTL
0x00028818 PA_CL_VTE_CNTL
0x0002881C PA_CL_VS_OUT_CNTL
0x00028820 PA_CL_NANINF_CNTL
0x00028824 PA_SU_LINE_STIPPLE_CNTL
0x00028828 PA_SU_LINE_STIPPLE_SCALE
0x0002882C PA_SU_PRIM_FILTER_CNTL
0x00028838 SQ_DYN_GPR_RESOURCE_LIMIT_1
0x00028844 SQ_PGM_RESOURCES_PS
0x00028848 SQ_PGM_RESOURCES_2_PS
0x0002884C SQ_PGM_EXPORTS_PS
0x00028860 SQ_PGM_RESOURCES_VS
0x00028864 SQ_PGM_RESOURCES_2_VS
0x00028878 SQ_PGM_RESOURCES_GS
0x0002887C SQ_PGM_RESOURCES_2_GS
0x00028890 SQ_PGM_RESOURCES_ES
0x00028894 SQ_PGM_RESOURCES_2_ES
0x000288A8 SQ_PGM_RESOURCES_FS
0x000288BC SQ_PGM_RESOURCES_HS
0x000288C0 SQ_PGM_RESOURCES_2_HS
0x000288D4 SQ_PGM_RESOURCES_LS
0x000288D8 SQ_PGM_RESOURCES_2_LS
0x000288E8 SQ_LDS_ALLOC
0x000288EC SQ_LDS_ALLOC_PS
0x000288F0 SQ_VTX_SEMANTIC_CLEAR
0x00028A00 PA_SU_POINT_SIZE
0x00028A04 PA_SU_POINT_MINMAX
0x00028A08 PA_SU_LINE_CNTL
0x00028A0C PA_SC_LINE_STIPPLE
0x00028A10 VGT_OUTPUT_PATH_CNTL
0x00028A14 VGT_HOS_CNTL
0x00028A18 VGT_HOS_MAX_TESS_LEVEL
0x00028A1C VGT_HOS_MIN_TESS_LEVEL
0x00028A20 VGT_HOS_REUSE_DEPTH
0x00028A24 VGT_GROUP_PRIM_TYPE
0x00028A28 VGT_GROUP_FIRST_DECR
0x00028A2C VGT_GROUP_DECR
0x00028A30 VGT_GROUP_VECT_0_CNTL
0x00028A34 VGT_GROUP_VECT_1_CNTL
0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A48 PA_SC_MODE_CNTL_0
0x00028A4C PA_SC_MODE_CNTL_1
0x00028A50 VGT_ENHANCE
0x00028A54 VGT_GS_PER_ES
0x00028A58 VGT_ES_PER_GS
0x00028A5C VGT_GS_PER_VS
0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x00028A84 VGT_PRIMITIVEID_EN
0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
0x00028AA0 VGT_INSTANCE_STEP_RATE_0
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
0x00028AC0 DB_SRESULTS_COMPARE_STATE0
0x00028AC4 DB_SRESULTS_COMPARE_STATE1
0x00028AC8 DB_PRELOAD_CONTROL
0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x00028B54 VGT_SHADER_STAGES_EN
0x00028B58 VGT_LS_HS_CONFIG
0x00028B5C VGT_LS_SIZE
0x00028B60 VGT_HS_SIZE
0x00028B64 VGT_LS_HS_ALLOC
0x00028B68 VGT_HS_PATCH_CONST
0x00028B6C VGT_TF_PARAM
0x00028B70 DB_ALPHA_TO_MASK
0x00028B74 VGT_DISPATCH_INITIATOR
0x00028B78 PA_SU_POLY_OFFSET_DB_FMT_CNTL
0x00028B7C PA_SU_POLY_OFFSET_CLAMP
0x00028B80 PA_SU_POLY_OFFSET_FRONT_SCALE
0x00028B84 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028B88 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028B8C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028B74 VGT_GS_INSTANCE_CNT
0x00028C00 PA_SC_LINE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
0x00028C10 PA_CL_GB_VERT_DISC_ADJ
0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
0x00028C1C PA_SC_AA_SAMPLE_LOCS_0
0x00028C20 PA_SC_AA_SAMPLE_LOCS_1
0x00028C24 PA_SC_AA_SAMPLE_LOCS_2
0x00028C28 PA_SC_AA_SAMPLE_LOCS_3
0x00028C2C PA_SC_AA_SAMPLE_LOCS_4
0x00028C30 PA_SC_AA_SAMPLE_LOCS_5
0x00028C34 PA_SC_AA_SAMPLE_LOCS_6
0x00028C38 PA_SC_AA_SAMPLE_LOCS_7
0x00028C3C PA_SC_AA_MASK
0x00028C78 CB_COLOR0_DIM
0x00028CB4 CB_COLOR1_DIM
0x00028CF0 CB_COLOR2_DIM
0x00028D2C CB_COLOR3_DIM
0x00028D68 CB_COLOR4_DIM
0x00028DA4 CB_COLOR5_DIM
0x00028DE0 CB_COLOR6_DIM
0x00028E1C CB_COLOR7_DIM
0x00028E58 CB_COLOR8_DIM
0x00028E74 CB_COLOR9_DIM
0x00028E90 CB_COLOR10_DIM
0x00028EAC CB_COLOR11_DIM
0x00028C8C CB_COLOR0_CLEAR_WORD0
0x00028C90 CB_COLOR0_CLEAR_WORD1
0x00028C94 CB_COLOR0_CLEAR_WORD2
0x00028C98 CB_COLOR0_CLEAR_WORD3
0x00028CC8 CB_COLOR1_CLEAR_WORD0
0x00028CCC CB_COLOR1_CLEAR_WORD1
0x00028CD0 CB_COLOR1_CLEAR_WORD2
0x00028CD4 CB_COLOR1_CLEAR_WORD3
0x00028D04 CB_COLOR2_CLEAR_WORD0
0x00028D08 CB_COLOR2_CLEAR_WORD1
0x00028D0C CB_COLOR2_CLEAR_WORD2
0x00028D10 CB_COLOR2_CLEAR_WORD3
0x00028D40 CB_COLOR3_CLEAR_WORD0
0x00028D44 CB_COLOR3_CLEAR_WORD1
0x00028D48 CB_COLOR3_CLEAR_WORD2
0x00028D4C CB_COLOR3_CLEAR_WORD3
0x00028D7C CB_COLOR4_CLEAR_WORD0
0x00028D80 CB_COLOR4_CLEAR_WORD1
0x00028D84 CB_COLOR4_CLEAR_WORD2
0x00028D88 CB_COLOR4_CLEAR_WORD3
0x00028DB8 CB_COLOR5_CLEAR_WORD0
0x00028DBC CB_COLOR5_CLEAR_WORD1
0x00028DC0 CB_COLOR5_CLEAR_WORD2
0x00028DC4 CB_COLOR5_CLEAR_WORD3
0x00028DF4 CB_COLOR6_CLEAR_WORD0
0x00028DF8 CB_COLOR6_CLEAR_WORD1
0x00028DFC CB_COLOR6_CLEAR_WORD2
0x00028E00 CB_COLOR6_CLEAR_WORD3
0x00028E30 CB_COLOR7_CLEAR_WORD0
0x00028E34 CB_COLOR7_CLEAR_WORD1
0x00028E38 CB_COLOR7_CLEAR_WORD2
0x00028E3C CB_COLOR7_CLEAR_WORD3
0x00028F80 SQ_ALU_CONST_BUFFER_SIZE_HS_0
0x00028F84 SQ_ALU_CONST_BUFFER_SIZE_HS_1
0x00028F88 SQ_ALU_CONST_BUFFER_SIZE_HS_2
0x00028F8C SQ_ALU_CONST_BUFFER_SIZE_HS_3
0x00028F90 SQ_ALU_CONST_BUFFER_SIZE_HS_4
0x00028F94 SQ_ALU_CONST_BUFFER_SIZE_HS_5
0x00028F98 SQ_ALU_CONST_BUFFER_SIZE_HS_6
0x00028F9C SQ_ALU_CONST_BUFFER_SIZE_HS_7
0x00028FA0 SQ_ALU_CONST_BUFFER_SIZE_HS_8
0x00028FA4 SQ_ALU_CONST_BUFFER_SIZE_HS_9
0x00028FA8 SQ_ALU_CONST_BUFFER_SIZE_HS_10
0x00028FAC SQ_ALU_CONST_BUFFER_SIZE_HS_11
0x00028FB0 SQ_ALU_CONST_BUFFER_SIZE_HS_12
0x00028FB4 SQ_ALU_CONST_BUFFER_SIZE_HS_13
0x00028FB8 SQ_ALU_CONST_BUFFER_SIZE_HS_14
0x00028FBC SQ_ALU_CONST_BUFFER_SIZE_HS_15
0x00028FC0 SQ_ALU_CONST_BUFFER_SIZE_LS_0
0x00028FC4 SQ_ALU_CONST_BUFFER_SIZE_LS_1
0x00028FC8 SQ_ALU_CONST_BUFFER_SIZE_LS_2
0x00028FCC SQ_ALU_CONST_BUFFER_SIZE_LS_3
0x00028FD0 SQ_ALU_CONST_BUFFER_SIZE_LS_4
0x00028FD4 SQ_ALU_CONST_BUFFER_SIZE_LS_5
0x00028FD8 SQ_ALU_CONST_BUFFER_SIZE_LS_6
0x00028FDC SQ_ALU_CONST_BUFFER_SIZE_LS_7
0x00028FE0 SQ_ALU_CONST_BUFFER_SIZE_LS_8
0x00028FE4 SQ_ALU_CONST_BUFFER_SIZE_LS_9
0x00028FE8 SQ_ALU_CONST_BUFFER_SIZE_LS_10
0x00028FEC SQ_ALU_CONST_BUFFER_SIZE_LS_11
0x00028FF0 SQ_ALU_CONST_BUFFER_SIZE_LS_12
0x00028FF4 SQ_ALU_CONST_BUFFER_SIZE_LS_13
0x00028FF8 SQ_ALU_CONST_BUFFER_SIZE_LS_14
0x00028FFC SQ_ALU_CONST_BUFFER_SIZE_LS_15
0x0003CFF0 SQ_VTX_BASE_VTX_LOC
0x0003CFF4 SQ_VTX_START_INST_LOC
0x0003FF00 SQ_TEX_SAMPLER_CLEAR
0x0003FF04 SQ_TEX_RESOURCE_CLEAR
0x0003FF08 SQ_LOOP_BOOL_CLEAR
/drivers/video/drm/radeon/reg_srcs/r100
0,0 → 1,105
r100 0x3294
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
0x1810 FOG_3D_TABLE_START
0x1814 FOG_3D_TABLE_END
0x1a14 FOG_TABLE_INDEX
0x1a18 FOG_TABLE_DATA
0x1c14 PP_MISC
0x1c18 PP_FOG_COLOR
0x1c1c RE_SOLID_COLOR
0x1c20 RB3D_BLENDCNTL
0x1c4c SE_CNTL
0x1c50 SE_COORD_FMT
0x1c60 PP_TXCBLEND_0
0x1c64 PP_TXABLEND_0
0x1c68 PP_TFACTOR_0
0x1c78 PP_TXCBLEND_1
0x1c7c PP_TXABLEND_1
0x1c80 PP_TFACTOR_1
0x1c90 PP_TXCBLEND_2
0x1c94 PP_TXABLEND_2
0x1c98 PP_TFACTOR_2
0x1cc8 RE_STIPPLE_ADDR
0x1ccc RE_STIPPLE_DATA
0x1cd0 RE_LINE_PATTERN
0x1cd4 RE_LINE_STATE
0x1d40 PP_BORDER_COLOR0
0x1d44 PP_BORDER_COLOR1
0x1d48 PP_BORDER_COLOR2
0x1d7c RB3D_STENCILREFMASK
0x1d80 RB3D_ROPCNTL
0x1d84 RB3D_PLANEMASK
0x1d98 VAP_VPORT_XSCALE
0x1d9C VAP_VPORT_XOFFSET
0x1da0 VAP_VPORT_YSCALE
0x1da4 VAP_VPORT_YOFFSET
0x1da8 VAP_VPORT_ZSCALE
0x1dac VAP_VPORT_ZOFFSET
0x1db0 SE_ZBIAS_FACTOR
0x1db4 SE_ZBIAS_CONSTANT
0x1db8 SE_LINE_WIDTH
0x2140 SE_CNTL_STATUS
0x2200 SE_TCL_VECTOR_INDX_REG
0x2204 SE_TCL_VECTOR_DATA_REG
0x2208 SE_TCL_SCALAR_INDX_REG
0x220c SE_TCL_SCALAR_DATA_REG
0x2210 SE_TCL_MATERIAL_EMISSIVE_RED
0x2214 SE_TCL_MATERIAL_EMISSIVE_GREEN
0x2218 SE_TCL_MATERIAL_EMISSIVE_BLUE
0x221c SE_TCL_MATERIAL_EMISSIVE_ALPHA
0x2220 SE_TCL_MATERIAL_AMBIENT_RED
0x2224 SE_TCL_MATERIAL_AMBIENT_GREEN
0x2228 SE_TCL_MATERIAL_AMBIENT_BLUE
0x222c SE_TCL_MATERIAL_AMBIENT_ALPHA
0x2230 SE_TCL_MATERIAL_DIFFUSE_RED
0x2234 SE_TCL_MATERIAL_DIFFUSE_GREEN
0x2238 SE_TCL_MATERIAL_DIFFUSE_BLUE
0x223c SE_TCL_MATERIAL_DIFFUSE_ALPHA
0x2240 SE_TCL_MATERIAL_SPECULAR_RED
0x2244 SE_TCL_MATERIAL_SPECULAR_GREEN
0x2248 SE_TCL_MATERIAL_SPECULAR_BLUE
0x224c SE_TCL_MATERIAL_SPECULAR_ALPHA
0x2250 SE_TCL_SHININESS
0x2254 SE_TCL_OUTPUT_VTX_FMT
0x2258 SE_TCL_OUTPUT_VTX_SEL
0x225c SE_TCL_MATRIX_SELECT_0
0x2260 SE_TCL_MATRIX_SELECT_1
0x2264 SE_TCL_UCP_VERT_BLEND_CNTL
0x2268 SE_TCL_TEXTURE_PROC_CTL
0x226c SE_TCL_LIGHT_MODEL_CTL
0x2270 SE_TCL_PER_LIGHT_CTL_0
0x2274 SE_TCL_PER_LIGHT_CTL_1
0x2278 SE_TCL_PER_LIGHT_CTL_2
0x227c SE_TCL_PER_LIGHT_CTL_3
0x2284 SE_TCL_STATE_FLUSH
0x26c0 RE_TOP_LEFT
0x26c4 RE_MISC
0x3290 RB3D_ZPASS_DATA
/drivers/video/drm/radeon/reg_srcs/r200
0,0 → 1,186
r200 0x3294
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
0x1c14 PP_MISC
0x1c18 PP_FOG_COLOR
0x1c1c RE_SOLID_COLOR
0x1c20 RB3D_BLENDCNTL
0x1c4c SE_CNTL
0x1c50 RE_CNTL
0x1cc8 RE_STIPPLE_ADDR
0x1ccc RE_STIPPLE_DATA
0x1cd0 RE_LINE_PATTERN
0x1cd4 RE_LINE_STATE
0x1cd8 RE_SCISSOR_TL_0
0x1cdc RE_SCISSOR_BR_0
0x1ce0 RE_SCISSOR_TL_1
0x1ce4 RE_SCISSOR_BR_1
0x1ce8 RE_SCISSOR_TL_2
0x1cec RE_SCISSOR_BR_2
0x1d60 RB3D_DEPTHXY_OFFSET
0x1d7c RB3D_STENCILREFMASK
0x1d80 RB3D_ROPCNTL
0x1d84 RB3D_PLANEMASK
0x1d98 VAP_VPORT_XSCALE
0x1d9c VAP_VPORT_XOFFSET
0x1da0 VAP_VPORT_YSCALE
0x1da4 VAP_VPORT_YOFFSET
0x1da8 VAP_VPORT_ZSCALE
0x1dac VAP_VPORT_ZOFFSET
0x1db0 SE_ZBIAS_FACTOR
0x1db4 SE_ZBIAS_CONSTANT
0x1db8 SE_LINE_WIDTH
0x2080 SE_VAP_CNTL
0x2090 SE_TCL_OUTPUT_VTX_FMT_0
0x2094 SE_TCL_OUTPUT_VTX_FMT_1
0x20b0 SE_VTE_CNTL
0x2140 SE_CNTL_STATUS
0x2180 SE_VTX_STATE_CNTL
0x2200 SE_TCL_VECTOR_INDX_REG
0x2204 SE_TCL_VECTOR_DATA_REG
0x2208 SE_TCL_SCALAR_INDX_REG
0x220c SE_TCL_SCALAR_DATA_REG
0x2230 SE_TCL_MATRIX_SEL_0
0x2234 SE_TCL_MATRIX_SEL_1
0x2238 SE_TCL_MATRIX_SEL_2
0x223c SE_TCL_MATRIX_SEL_3
0x2240 SE_TCL_MATRIX_SEL_4
0x2250 SE_TCL_OUTPUT_VTX_COMP_SEL
0x2254 SE_TCL_INPUT_VTX_VECTOR_ADDR_0
0x2258 SE_TCL_INPUT_VTX_VECTOR_ADDR_1
0x225c SE_TCL_INPUT_VTX_VECTOR_ADDR_2
0x2260 SE_TCL_INPUT_VTX_VECTOR_ADDR_3
0x2268 SE_TCL_LIGHT_MODEL_CTL_0
0x226c SE_TCL_LIGHT_MODEL_CTL_1
0x2270 SE_TCL_PER_LIGHT_CTL_0
0x2274 SE_TCL_PER_LIGHT_CTL_1
0x2278 SE_TCL_PER_LIGHT_CTL_2
0x227c SE_TCL_PER_LIGHT_CTL_3
0x2284 VAP_PVS_STATE_FLUSH_REG
0x22a8 SE_TCL_TEX_PROC_CTL_2
0x22ac SE_TCL_TEX_PROC_CTL_3
0x22b0 SE_TCL_TEX_PROC_CTL_0
0x22b4 SE_TCL_TEX_PROC_CTL_1
0x22b8 SE_TCL_TEX_CYL_WRAP_CTL
0x22c0 SE_TCL_UCP_VERT_BLEND_CNTL
0x22c4 SE_TCL_POINT_SPRITE_CNTL
0x22d0 SE_PVS_CNTL
0x22d4 SE_PVS_CONST_CNTL
0x2648 RE_POINTSIZE
0x26c0 RE_TOP_LEFT
0x26c4 RE_MISC
0x26f0 RE_AUX_SCISSOR_CNTL
0x2c14 PP_BORDER_COLOR_0
0x2c34 PP_BORDER_COLOR_1
0x2c54 PP_BORDER_COLOR_2
0x2c74 PP_BORDER_COLOR_3
0x2c94 PP_BORDER_COLOR_4
0x2cb4 PP_BORDER_COLOR_5
0x2cc4 PP_CNTL_X
0x2cf8 PP_TRI_PERF
0x2cfc PP_PERF_CNTL
0x2d9c PP_TAM_DEBUG3
0x2ee0 PP_TFACTOR_0
0x2ee4 PP_TFACTOR_1
0x2ee8 PP_TFACTOR_2
0x2eec PP_TFACTOR_3
0x2ef0 PP_TFACTOR_4
0x2ef4 PP_TFACTOR_5
0x2ef8 PP_TFACTOR_6
0x2efc PP_TFACTOR_7
0x2f00 PP_TXCBLEND_0
0x2f04 PP_TXCBLEND2_0
0x2f08 PP_TXABLEND_0
0x2f0c PP_TXABLEND2_0
0x2f10 PP_TXCBLEND_1
0x2f14 PP_TXCBLEND2_1
0x2f18 PP_TXABLEND_1
0x2f1c PP_TXABLEND2_1
0x2f20 PP_TXCBLEND_2
0x2f24 PP_TXCBLEND2_2
0x2f28 PP_TXABLEND_2
0x2f2c PP_TXABLEND2_2
0x2f30 PP_TXCBLEND_3
0x2f34 PP_TXCBLEND2_3
0x2f38 PP_TXABLEND_3
0x2f3c PP_TXABLEND2_3
0x2f40 PP_TXCBLEND_4
0x2f44 PP_TXCBLEND2_4
0x2f48 PP_TXABLEND_4
0x2f4c PP_TXABLEND2_4
0x2f50 PP_TXCBLEND_5
0x2f54 PP_TXCBLEND2_5
0x2f58 PP_TXABLEND_5
0x2f5c PP_TXABLEND2_5
0x2f60 PP_TXCBLEND_6
0x2f64 PP_TXCBLEND2_6
0x2f68 PP_TXABLEND_6
0x2f6c PP_TXABLEND2_6
0x2f70 PP_TXCBLEND_7
0x2f74 PP_TXCBLEND2_7
0x2f78 PP_TXABLEND_7
0x2f7c PP_TXABLEND2_7
0x2f80 PP_TXCBLEND_8
0x2f84 PP_TXCBLEND2_8
0x2f88 PP_TXABLEND_8
0x2f8c PP_TXABLEND2_8
0x2f90 PP_TXCBLEND_9
0x2f94 PP_TXCBLEND2_9
0x2f98 PP_TXABLEND_9
0x2f9c PP_TXABLEND2_9
0x2fa0 PP_TXCBLEND_10
0x2fa4 PP_TXCBLEND2_10
0x2fa8 PP_TXABLEND_10
0x2fac PP_TXABLEND2_10
0x2fb0 PP_TXCBLEND_11
0x2fb4 PP_TXCBLEND2_11
0x2fb8 PP_TXABLEND_11
0x2fbc PP_TXABLEND2_11
0x2fc0 PP_TXCBLEND_12
0x2fc4 PP_TXCBLEND2_12
0x2fc8 PP_TXABLEND_12
0x2fcc PP_TXABLEND2_12
0x2fd0 PP_TXCBLEND_13
0x2fd4 PP_TXCBLEND2_13
0x2fd8 PP_TXABLEND_13
0x2fdc PP_TXABLEND2_13
0x2fe0 PP_TXCBLEND_14
0x2fe4 PP_TXCBLEND2_14
0x2fe8 PP_TXABLEND_14
0x2fec PP_TXABLEND2_14
0x2ff0 PP_TXCBLEND_15
0x2ff4 PP_TXCBLEND2_15
0x2ff8 PP_TXABLEND_15
0x2ffc PP_TXABLEND2_15
0x3218 RB3D_BLENCOLOR
0x321c RB3D_ABLENDCNTL
0x3220 RB3D_CBLENDCNTL
0x3290 RB3D_ZPASS_DATA
 
/drivers/video/drm/radeon/reg_srcs/r300
0,0 → 1,714
r300 0x4f60
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
0x1D98 VAP_VPORT_XSCALE
0x1D9C VAP_VPORT_XOFFSET
0x1DA0 VAP_VPORT_YSCALE
0x1DA4 VAP_VPORT_YOFFSET
0x1DA8 VAP_VPORT_ZSCALE
0x1DAC VAP_VPORT_ZOFFSET
0x2080 VAP_CNTL
0x2090 VAP_OUT_VTX_FMT_0
0x2094 VAP_OUT_VTX_FMT_1
0x20B0 VAP_VTE_CNTL
0x2138 VAP_VF_MIN_VTX_INDX
0x2140 VAP_CNTL_STATUS
0x2150 VAP_PROG_STREAM_CNTL_0
0x2154 VAP_PROG_STREAM_CNTL_1
0x2158 VAP_PROG_STREAM_CNTL_2
0x215C VAP_PROG_STREAM_CNTL_3
0x2160 VAP_PROG_STREAM_CNTL_4
0x2164 VAP_PROG_STREAM_CNTL_5
0x2168 VAP_PROG_STREAM_CNTL_6
0x216C VAP_PROG_STREAM_CNTL_7
0x2180 VAP_VTX_STATE_CNTL
0x2184 VAP_VSM_VTX_ASSM
0x2188 VAP_VTX_STATE_IND_REG_0
0x218C VAP_VTX_STATE_IND_REG_1
0x2190 VAP_VTX_STATE_IND_REG_2
0x2194 VAP_VTX_STATE_IND_REG_3
0x2198 VAP_VTX_STATE_IND_REG_4
0x219C VAP_VTX_STATE_IND_REG_5
0x21A0 VAP_VTX_STATE_IND_REG_6
0x21A4 VAP_VTX_STATE_IND_REG_7
0x21A8 VAP_VTX_STATE_IND_REG_8
0x21AC VAP_VTX_STATE_IND_REG_9
0x21B0 VAP_VTX_STATE_IND_REG_10
0x21B4 VAP_VTX_STATE_IND_REG_11
0x21B8 VAP_VTX_STATE_IND_REG_12
0x21BC VAP_VTX_STATE_IND_REG_13
0x21C0 VAP_VTX_STATE_IND_REG_14
0x21C4 VAP_VTX_STATE_IND_REG_15
0x21DC VAP_PSC_SGN_NORM_CNTL
0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
0x21EC VAP_PROG_STREAM_CNTL_EXT_3
0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
0x21FC VAP_PROG_STREAM_CNTL_EXT_7
0x2200 VAP_PVS_VECTOR_INDX_REG
0x2204 VAP_PVS_VECTOR_DATA_REG
0x2208 VAP_PVS_VECTOR_DATA_REG_128
0x221C VAP_CLIP_CNTL
0x2220 VAP_GB_VERT_CLIP_ADJ
0x2224 VAP_GB_VERT_DISC_ADJ
0x2228 VAP_GB_HORZ_CLIP_ADJ
0x222C VAP_GB_HORZ_DISC_ADJ
0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
0x2284 VAP_PVS_STATE_FLUSH_REG
0x2288 VAP_PVS_VTX_TIMEOUT_REG
0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
0x22D0 VAP_PVS_CODE_CNTL_0
0x22D4 VAP_PVS_CONST_CNTL
0x22D8 VAP_PVS_CODE_CNTL_1
0x22DC VAP_PVS_FLOW_CNTL_OPC
0x342C RB2D_DSTCACHE_CTLSTAT
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
0x4010 GB_MSPOS0
0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
0x420C GA_POINT_T1
0x4214 GA_TRIANGLE_STIPPLE
0x421C GA_POINT_SIZE
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
0x4278 GA_COLOR_CONTROL
0x427C GA_SOLID_RG
0x4280 GA_SOLID_BA
0x4288 GA_POLY_MODE
0x428C GA_ROUND_MODE
0x4290 GA_OFFSET
0x4294 GA_FOG_SCALE
0x4298 GA_FOG_OFFSET
0x42A0 SU_TEX_WRAP
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
0x42C4 SU_DEPTH_OFFSET
0x42C8 SU_REG_DEST
0x4300 RS_COUNT
0x4304 RS_INST_COUNT
0x4310 RS_IP_0
0x4314 RS_IP_1
0x4318 RS_IP_2
0x431C RS_IP_3
0x4320 RS_IP_4
0x4324 RS_IP_5
0x4328 RS_IP_6
0x432C RS_IP_7
0x4330 RS_INST_0
0x4334 RS_INST_1
0x4338 RS_INST_2
0x433C RS_INST_3
0x4340 RS_INST_4
0x4344 RS_INST_5
0x4348 RS_INST_6
0x434C RS_INST_7
0x4350 RS_INST_8
0x4354 RS_INST_9
0x4358 RS_INST_10
0x435C RS_INST_11
0x4360 RS_INST_12
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
0x43B8 SC_CLIP_1_A
0x43BC SC_CLIP_1_B
0x43C0 SC_CLIP_2_A
0x43C4 SC_CLIP_2_B
0x43C8 SC_CLIP_3_A
0x43CC SC_CLIP_3_B
0x43D0 SC_CLIP_RULE
0x43E0 SC_SCISSOR0
0x43E8 SC_SCREENDOOR
0x4440 TX_FILTER1_0
0x4444 TX_FILTER1_1
0x4448 TX_FILTER1_2
0x444C TX_FILTER1_3
0x4450 TX_FILTER1_4
0x4454 TX_FILTER1_5
0x4458 TX_FILTER1_6
0x445C TX_FILTER1_7
0x4460 TX_FILTER1_8
0x4464 TX_FILTER1_9
0x4468 TX_FILTER1_10
0x446C TX_FILTER1_11
0x4470 TX_FILTER1_12
0x4474 TX_FILTER1_13
0x4478 TX_FILTER1_14
0x447C TX_FILTER1_15
0x4580 TX_CHROMA_KEY_0
0x4584 TX_CHROMA_KEY_1
0x4588 TX_CHROMA_KEY_2
0x458C TX_CHROMA_KEY_3
0x4590 TX_CHROMA_KEY_4
0x4594 TX_CHROMA_KEY_5
0x4598 TX_CHROMA_KEY_6
0x459C TX_CHROMA_KEY_7
0x45A0 TX_CHROMA_KEY_8
0x45A4 TX_CHROMA_KEY_9
0x45A8 TX_CHROMA_KEY_10
0x45AC TX_CHROMA_KEY_11
0x45B0 TX_CHROMA_KEY_12
0x45B4 TX_CHROMA_KEY_13
0x45B8 TX_CHROMA_KEY_14
0x45BC TX_CHROMA_KEY_15
0x45C0 TX_BORDER_COLOR_0
0x45C4 TX_BORDER_COLOR_1
0x45C8 TX_BORDER_COLOR_2
0x45CC TX_BORDER_COLOR_3
0x45D0 TX_BORDER_COLOR_4
0x45D4 TX_BORDER_COLOR_5
0x45D8 TX_BORDER_COLOR_6
0x45DC TX_BORDER_COLOR_7
0x45E0 TX_BORDER_COLOR_8
0x45E4 TX_BORDER_COLOR_9
0x45E8 TX_BORDER_COLOR_10
0x45EC TX_BORDER_COLOR_11
0x45F0 TX_BORDER_COLOR_12
0x45F4 TX_BORDER_COLOR_13
0x45F8 TX_BORDER_COLOR_14
0x45FC TX_BORDER_COLOR_15
0x4600 US_CONFIG
0x4604 US_PIXSIZE
0x4608 US_CODE_OFFSET
0x460C US_RESET
0x4610 US_CODE_ADDR_0
0x4614 US_CODE_ADDR_1
0x4618 US_CODE_ADDR_2
0x461C US_CODE_ADDR_3
0x4620 US_TEX_INST_0
0x4624 US_TEX_INST_1
0x4628 US_TEX_INST_2
0x462C US_TEX_INST_3
0x4630 US_TEX_INST_4
0x4634 US_TEX_INST_5
0x4638 US_TEX_INST_6
0x463C US_TEX_INST_7
0x4640 US_TEX_INST_8
0x4644 US_TEX_INST_9
0x4648 US_TEX_INST_10
0x464C US_TEX_INST_11
0x4650 US_TEX_INST_12
0x4654 US_TEX_INST_13
0x4658 US_TEX_INST_14
0x465C US_TEX_INST_15
0x4660 US_TEX_INST_16
0x4664 US_TEX_INST_17
0x4668 US_TEX_INST_18
0x466C US_TEX_INST_19
0x4670 US_TEX_INST_20
0x4674 US_TEX_INST_21
0x4678 US_TEX_INST_22
0x467C US_TEX_INST_23
0x4680 US_TEX_INST_24
0x4684 US_TEX_INST_25
0x4688 US_TEX_INST_26
0x468C US_TEX_INST_27
0x4690 US_TEX_INST_28
0x4694 US_TEX_INST_29
0x4698 US_TEX_INST_30
0x469C US_TEX_INST_31
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
0x46C0 US_ALU_RGB_ADDR_0
0x46C4 US_ALU_RGB_ADDR_1
0x46C8 US_ALU_RGB_ADDR_2
0x46CC US_ALU_RGB_ADDR_3
0x46D0 US_ALU_RGB_ADDR_4
0x46D4 US_ALU_RGB_ADDR_5
0x46D8 US_ALU_RGB_ADDR_6
0x46DC US_ALU_RGB_ADDR_7
0x46E0 US_ALU_RGB_ADDR_8
0x46E4 US_ALU_RGB_ADDR_9
0x46E8 US_ALU_RGB_ADDR_10
0x46EC US_ALU_RGB_ADDR_11
0x46F0 US_ALU_RGB_ADDR_12
0x46F4 US_ALU_RGB_ADDR_13
0x46F8 US_ALU_RGB_ADDR_14
0x46FC US_ALU_RGB_ADDR_15
0x4700 US_ALU_RGB_ADDR_16
0x4704 US_ALU_RGB_ADDR_17
0x4708 US_ALU_RGB_ADDR_18
0x470C US_ALU_RGB_ADDR_19
0x4710 US_ALU_RGB_ADDR_20
0x4714 US_ALU_RGB_ADDR_21
0x4718 US_ALU_RGB_ADDR_22
0x471C US_ALU_RGB_ADDR_23
0x4720 US_ALU_RGB_ADDR_24
0x4724 US_ALU_RGB_ADDR_25
0x4728 US_ALU_RGB_ADDR_26
0x472C US_ALU_RGB_ADDR_27
0x4730 US_ALU_RGB_ADDR_28
0x4734 US_ALU_RGB_ADDR_29
0x4738 US_ALU_RGB_ADDR_30
0x473C US_ALU_RGB_ADDR_31
0x4740 US_ALU_RGB_ADDR_32
0x4744 US_ALU_RGB_ADDR_33
0x4748 US_ALU_RGB_ADDR_34
0x474C US_ALU_RGB_ADDR_35
0x4750 US_ALU_RGB_ADDR_36
0x4754 US_ALU_RGB_ADDR_37
0x4758 US_ALU_RGB_ADDR_38
0x475C US_ALU_RGB_ADDR_39
0x4760 US_ALU_RGB_ADDR_40
0x4764 US_ALU_RGB_ADDR_41
0x4768 US_ALU_RGB_ADDR_42
0x476C US_ALU_RGB_ADDR_43
0x4770 US_ALU_RGB_ADDR_44
0x4774 US_ALU_RGB_ADDR_45
0x4778 US_ALU_RGB_ADDR_46
0x477C US_ALU_RGB_ADDR_47
0x4780 US_ALU_RGB_ADDR_48
0x4784 US_ALU_RGB_ADDR_49
0x4788 US_ALU_RGB_ADDR_50
0x478C US_ALU_RGB_ADDR_51
0x4790 US_ALU_RGB_ADDR_52
0x4794 US_ALU_RGB_ADDR_53
0x4798 US_ALU_RGB_ADDR_54
0x479C US_ALU_RGB_ADDR_55
0x47A0 US_ALU_RGB_ADDR_56
0x47A4 US_ALU_RGB_ADDR_57
0x47A8 US_ALU_RGB_ADDR_58
0x47AC US_ALU_RGB_ADDR_59
0x47B0 US_ALU_RGB_ADDR_60
0x47B4 US_ALU_RGB_ADDR_61
0x47B8 US_ALU_RGB_ADDR_62
0x47BC US_ALU_RGB_ADDR_63
0x47C0 US_ALU_ALPHA_ADDR_0
0x47C4 US_ALU_ALPHA_ADDR_1
0x47C8 US_ALU_ALPHA_ADDR_2
0x47CC US_ALU_ALPHA_ADDR_3
0x47D0 US_ALU_ALPHA_ADDR_4
0x47D4 US_ALU_ALPHA_ADDR_5
0x47D8 US_ALU_ALPHA_ADDR_6
0x47DC US_ALU_ALPHA_ADDR_7
0x47E0 US_ALU_ALPHA_ADDR_8
0x47E4 US_ALU_ALPHA_ADDR_9
0x47E8 US_ALU_ALPHA_ADDR_10
0x47EC US_ALU_ALPHA_ADDR_11
0x47F0 US_ALU_ALPHA_ADDR_12
0x47F4 US_ALU_ALPHA_ADDR_13
0x47F8 US_ALU_ALPHA_ADDR_14
0x47FC US_ALU_ALPHA_ADDR_15
0x4800 US_ALU_ALPHA_ADDR_16
0x4804 US_ALU_ALPHA_ADDR_17
0x4808 US_ALU_ALPHA_ADDR_18
0x480C US_ALU_ALPHA_ADDR_19
0x4810 US_ALU_ALPHA_ADDR_20
0x4814 US_ALU_ALPHA_ADDR_21
0x4818 US_ALU_ALPHA_ADDR_22
0x481C US_ALU_ALPHA_ADDR_23
0x4820 US_ALU_ALPHA_ADDR_24
0x4824 US_ALU_ALPHA_ADDR_25
0x4828 US_ALU_ALPHA_ADDR_26
0x482C US_ALU_ALPHA_ADDR_27
0x4830 US_ALU_ALPHA_ADDR_28
0x4834 US_ALU_ALPHA_ADDR_29
0x4838 US_ALU_ALPHA_ADDR_30
0x483C US_ALU_ALPHA_ADDR_31
0x4840 US_ALU_ALPHA_ADDR_32
0x4844 US_ALU_ALPHA_ADDR_33
0x4848 US_ALU_ALPHA_ADDR_34
0x484C US_ALU_ALPHA_ADDR_35
0x4850 US_ALU_ALPHA_ADDR_36
0x4854 US_ALU_ALPHA_ADDR_37
0x4858 US_ALU_ALPHA_ADDR_38
0x485C US_ALU_ALPHA_ADDR_39
0x4860 US_ALU_ALPHA_ADDR_40
0x4864 US_ALU_ALPHA_ADDR_41
0x4868 US_ALU_ALPHA_ADDR_42
0x486C US_ALU_ALPHA_ADDR_43
0x4870 US_ALU_ALPHA_ADDR_44
0x4874 US_ALU_ALPHA_ADDR_45
0x4878 US_ALU_ALPHA_ADDR_46
0x487C US_ALU_ALPHA_ADDR_47
0x4880 US_ALU_ALPHA_ADDR_48
0x4884 US_ALU_ALPHA_ADDR_49
0x4888 US_ALU_ALPHA_ADDR_50
0x488C US_ALU_ALPHA_ADDR_51
0x4890 US_ALU_ALPHA_ADDR_52
0x4894 US_ALU_ALPHA_ADDR_53
0x4898 US_ALU_ALPHA_ADDR_54
0x489C US_ALU_ALPHA_ADDR_55
0x48A0 US_ALU_ALPHA_ADDR_56
0x48A4 US_ALU_ALPHA_ADDR_57
0x48A8 US_ALU_ALPHA_ADDR_58
0x48AC US_ALU_ALPHA_ADDR_59
0x48B0 US_ALU_ALPHA_ADDR_60
0x48B4 US_ALU_ALPHA_ADDR_61
0x48B8 US_ALU_ALPHA_ADDR_62
0x48BC US_ALU_ALPHA_ADDR_63
0x48C0 US_ALU_RGB_INST_0
0x48C4 US_ALU_RGB_INST_1
0x48C8 US_ALU_RGB_INST_2
0x48CC US_ALU_RGB_INST_3
0x48D0 US_ALU_RGB_INST_4
0x48D4 US_ALU_RGB_INST_5
0x48D8 US_ALU_RGB_INST_6
0x48DC US_ALU_RGB_INST_7
0x48E0 US_ALU_RGB_INST_8
0x48E4 US_ALU_RGB_INST_9
0x48E8 US_ALU_RGB_INST_10
0x48EC US_ALU_RGB_INST_11
0x48F0 US_ALU_RGB_INST_12
0x48F4 US_ALU_RGB_INST_13
0x48F8 US_ALU_RGB_INST_14
0x48FC US_ALU_RGB_INST_15
0x4900 US_ALU_RGB_INST_16
0x4904 US_ALU_RGB_INST_17
0x4908 US_ALU_RGB_INST_18
0x490C US_ALU_RGB_INST_19
0x4910 US_ALU_RGB_INST_20
0x4914 US_ALU_RGB_INST_21
0x4918 US_ALU_RGB_INST_22
0x491C US_ALU_RGB_INST_23
0x4920 US_ALU_RGB_INST_24
0x4924 US_ALU_RGB_INST_25
0x4928 US_ALU_RGB_INST_26
0x492C US_ALU_RGB_INST_27
0x4930 US_ALU_RGB_INST_28
0x4934 US_ALU_RGB_INST_29
0x4938 US_ALU_RGB_INST_30
0x493C US_ALU_RGB_INST_31
0x4940 US_ALU_RGB_INST_32
0x4944 US_ALU_RGB_INST_33
0x4948 US_ALU_RGB_INST_34
0x494C US_ALU_RGB_INST_35
0x4950 US_ALU_RGB_INST_36
0x4954 US_ALU_RGB_INST_37
0x4958 US_ALU_RGB_INST_38
0x495C US_ALU_RGB_INST_39
0x4960 US_ALU_RGB_INST_40
0x4964 US_ALU_RGB_INST_41
0x4968 US_ALU_RGB_INST_42
0x496C US_ALU_RGB_INST_43
0x4970 US_ALU_RGB_INST_44
0x4974 US_ALU_RGB_INST_45
0x4978 US_ALU_RGB_INST_46
0x497C US_ALU_RGB_INST_47
0x4980 US_ALU_RGB_INST_48
0x4984 US_ALU_RGB_INST_49
0x4988 US_ALU_RGB_INST_50
0x498C US_ALU_RGB_INST_51
0x4990 US_ALU_RGB_INST_52
0x4994 US_ALU_RGB_INST_53
0x4998 US_ALU_RGB_INST_54
0x499C US_ALU_RGB_INST_55
0x49A0 US_ALU_RGB_INST_56
0x49A4 US_ALU_RGB_INST_57
0x49A8 US_ALU_RGB_INST_58
0x49AC US_ALU_RGB_INST_59
0x49B0 US_ALU_RGB_INST_60
0x49B4 US_ALU_RGB_INST_61
0x49B8 US_ALU_RGB_INST_62
0x49BC US_ALU_RGB_INST_63
0x49C0 US_ALU_ALPHA_INST_0
0x49C4 US_ALU_ALPHA_INST_1
0x49C8 US_ALU_ALPHA_INST_2
0x49CC US_ALU_ALPHA_INST_3
0x49D0 US_ALU_ALPHA_INST_4
0x49D4 US_ALU_ALPHA_INST_5
0x49D8 US_ALU_ALPHA_INST_6
0x49DC US_ALU_ALPHA_INST_7
0x49E0 US_ALU_ALPHA_INST_8
0x49E4 US_ALU_ALPHA_INST_9
0x49E8 US_ALU_ALPHA_INST_10
0x49EC US_ALU_ALPHA_INST_11
0x49F0 US_ALU_ALPHA_INST_12
0x49F4 US_ALU_ALPHA_INST_13
0x49F8 US_ALU_ALPHA_INST_14
0x49FC US_ALU_ALPHA_INST_15
0x4A00 US_ALU_ALPHA_INST_16
0x4A04 US_ALU_ALPHA_INST_17
0x4A08 US_ALU_ALPHA_INST_18
0x4A0C US_ALU_ALPHA_INST_19
0x4A10 US_ALU_ALPHA_INST_20
0x4A14 US_ALU_ALPHA_INST_21
0x4A18 US_ALU_ALPHA_INST_22
0x4A1C US_ALU_ALPHA_INST_23
0x4A20 US_ALU_ALPHA_INST_24
0x4A24 US_ALU_ALPHA_INST_25
0x4A28 US_ALU_ALPHA_INST_26
0x4A2C US_ALU_ALPHA_INST_27
0x4A30 US_ALU_ALPHA_INST_28
0x4A34 US_ALU_ALPHA_INST_29
0x4A38 US_ALU_ALPHA_INST_30
0x4A3C US_ALU_ALPHA_INST_31
0x4A40 US_ALU_ALPHA_INST_32
0x4A44 US_ALU_ALPHA_INST_33
0x4A48 US_ALU_ALPHA_INST_34
0x4A4C US_ALU_ALPHA_INST_35
0x4A50 US_ALU_ALPHA_INST_36
0x4A54 US_ALU_ALPHA_INST_37
0x4A58 US_ALU_ALPHA_INST_38
0x4A5C US_ALU_ALPHA_INST_39
0x4A60 US_ALU_ALPHA_INST_40
0x4A64 US_ALU_ALPHA_INST_41
0x4A68 US_ALU_ALPHA_INST_42
0x4A6C US_ALU_ALPHA_INST_43
0x4A70 US_ALU_ALPHA_INST_44
0x4A74 US_ALU_ALPHA_INST_45
0x4A78 US_ALU_ALPHA_INST_46
0x4A7C US_ALU_ALPHA_INST_47
0x4A80 US_ALU_ALPHA_INST_48
0x4A84 US_ALU_ALPHA_INST_49
0x4A88 US_ALU_ALPHA_INST_50
0x4A8C US_ALU_ALPHA_INST_51
0x4A90 US_ALU_ALPHA_INST_52
0x4A94 US_ALU_ALPHA_INST_53
0x4A98 US_ALU_ALPHA_INST_54
0x4A9C US_ALU_ALPHA_INST_55
0x4AA0 US_ALU_ALPHA_INST_56
0x4AA4 US_ALU_ALPHA_INST_57
0x4AA8 US_ALU_ALPHA_INST_58
0x4AAC US_ALU_ALPHA_INST_59
0x4AB0 US_ALU_ALPHA_INST_60
0x4AB4 US_ALU_ALPHA_INST_61
0x4AB8 US_ALU_ALPHA_INST_62
0x4ABC US_ALU_ALPHA_INST_63
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
0x4BCC FG_FOG_COLOR_G
0x4BD0 FG_FOG_COLOR_B
0x4BD4 FG_ALPHA_FUNC
0x4BD8 FG_DEPTH_SRC
0x4C00 US_ALU_CONST_R_0
0x4C04 US_ALU_CONST_G_0
0x4C08 US_ALU_CONST_B_0
0x4C0C US_ALU_CONST_A_0
0x4C10 US_ALU_CONST_R_1
0x4C14 US_ALU_CONST_G_1
0x4C18 US_ALU_CONST_B_1
0x4C1C US_ALU_CONST_A_1
0x4C20 US_ALU_CONST_R_2
0x4C24 US_ALU_CONST_G_2
0x4C28 US_ALU_CONST_B_2
0x4C2C US_ALU_CONST_A_2
0x4C30 US_ALU_CONST_R_3
0x4C34 US_ALU_CONST_G_3
0x4C38 US_ALU_CONST_B_3
0x4C3C US_ALU_CONST_A_3
0x4C40 US_ALU_CONST_R_4
0x4C44 US_ALU_CONST_G_4
0x4C48 US_ALU_CONST_B_4
0x4C4C US_ALU_CONST_A_4
0x4C50 US_ALU_CONST_R_5
0x4C54 US_ALU_CONST_G_5
0x4C58 US_ALU_CONST_B_5
0x4C5C US_ALU_CONST_A_5
0x4C60 US_ALU_CONST_R_6
0x4C64 US_ALU_CONST_G_6
0x4C68 US_ALU_CONST_B_6
0x4C6C US_ALU_CONST_A_6
0x4C70 US_ALU_CONST_R_7
0x4C74 US_ALU_CONST_G_7
0x4C78 US_ALU_CONST_B_7
0x4C7C US_ALU_CONST_A_7
0x4C80 US_ALU_CONST_R_8
0x4C84 US_ALU_CONST_G_8
0x4C88 US_ALU_CONST_B_8
0x4C8C US_ALU_CONST_A_8
0x4C90 US_ALU_CONST_R_9
0x4C94 US_ALU_CONST_G_9
0x4C98 US_ALU_CONST_B_9
0x4C9C US_ALU_CONST_A_9
0x4CA0 US_ALU_CONST_R_10
0x4CA4 US_ALU_CONST_G_10
0x4CA8 US_ALU_CONST_B_10
0x4CAC US_ALU_CONST_A_10
0x4CB0 US_ALU_CONST_R_11
0x4CB4 US_ALU_CONST_G_11
0x4CB8 US_ALU_CONST_B_11
0x4CBC US_ALU_CONST_A_11
0x4CC0 US_ALU_CONST_R_12
0x4CC4 US_ALU_CONST_G_12
0x4CC8 US_ALU_CONST_B_12
0x4CCC US_ALU_CONST_A_12
0x4CD0 US_ALU_CONST_R_13
0x4CD4 US_ALU_CONST_G_13
0x4CD8 US_ALU_CONST_B_13
0x4CDC US_ALU_CONST_A_13
0x4CE0 US_ALU_CONST_R_14
0x4CE4 US_ALU_CONST_G_14
0x4CE8 US_ALU_CONST_B_14
0x4CEC US_ALU_CONST_A_14
0x4CF0 US_ALU_CONST_R_15
0x4CF4 US_ALU_CONST_G_15
0x4CF8 US_ALU_CONST_B_15
0x4CFC US_ALU_CONST_A_15
0x4D00 US_ALU_CONST_R_16
0x4D04 US_ALU_CONST_G_16
0x4D08 US_ALU_CONST_B_16
0x4D0C US_ALU_CONST_A_16
0x4D10 US_ALU_CONST_R_17
0x4D14 US_ALU_CONST_G_17
0x4D18 US_ALU_CONST_B_17
0x4D1C US_ALU_CONST_A_17
0x4D20 US_ALU_CONST_R_18
0x4D24 US_ALU_CONST_G_18
0x4D28 US_ALU_CONST_B_18
0x4D2C US_ALU_CONST_A_18
0x4D30 US_ALU_CONST_R_19
0x4D34 US_ALU_CONST_G_19
0x4D38 US_ALU_CONST_B_19
0x4D3C US_ALU_CONST_A_19
0x4D40 US_ALU_CONST_R_20
0x4D44 US_ALU_CONST_G_20
0x4D48 US_ALU_CONST_B_20
0x4D4C US_ALU_CONST_A_20
0x4D50 US_ALU_CONST_R_21
0x4D54 US_ALU_CONST_G_21
0x4D58 US_ALU_CONST_B_21
0x4D5C US_ALU_CONST_A_21
0x4D60 US_ALU_CONST_R_22
0x4D64 US_ALU_CONST_G_22
0x4D68 US_ALU_CONST_B_22
0x4D6C US_ALU_CONST_A_22
0x4D70 US_ALU_CONST_R_23
0x4D74 US_ALU_CONST_G_23
0x4D78 US_ALU_CONST_B_23
0x4D7C US_ALU_CONST_A_23
0x4D80 US_ALU_CONST_R_24
0x4D84 US_ALU_CONST_G_24
0x4D88 US_ALU_CONST_B_24
0x4D8C US_ALU_CONST_A_24
0x4D90 US_ALU_CONST_R_25
0x4D94 US_ALU_CONST_G_25
0x4D98 US_ALU_CONST_B_25
0x4D9C US_ALU_CONST_A_25
0x4DA0 US_ALU_CONST_R_26
0x4DA4 US_ALU_CONST_G_26
0x4DA8 US_ALU_CONST_B_26
0x4DAC US_ALU_CONST_A_26
0x4DB0 US_ALU_CONST_R_27
0x4DB4 US_ALU_CONST_G_27
0x4DB8 US_ALU_CONST_B_27
0x4DBC US_ALU_CONST_A_27
0x4DC0 US_ALU_CONST_R_28
0x4DC4 US_ALU_CONST_G_28
0x4DC8 US_ALU_CONST_B_28
0x4DCC US_ALU_CONST_A_28
0x4DD0 US_ALU_CONST_R_29
0x4DD4 US_ALU_CONST_G_29
0x4DD8 US_ALU_CONST_B_29
0x4DDC US_ALU_CONST_A_29
0x4DE0 US_ALU_CONST_R_30
0x4DE4 US_ALU_CONST_G_30
0x4DE8 US_ALU_CONST_B_30
0x4DEC US_ALU_CONST_A_30
0x4DF0 US_ALU_CONST_R_31
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
0x4E08 RB3D_ABLENDCNTL_R3
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
0x4E1C RB3D_CLRCMP_FLIPE_R3
0x4E20 RB3D_CLRCMP_CLR_R3
0x4E24 RB3D_CLRCMP_MSK_R3
0x4E48 RB3D_DEBUG_CTL
0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
0x4E50 RB3D_DITHER_CTL
0x4E54 RB3D_CMASK_OFFSET0
0x4E58 RB3D_CMASK_OFFSET1
0x4E5C RB3D_CMASK_OFFSET2
0x4E60 RB3D_CMASK_OFFSET3
0x4E64 RB3D_CMASK_PITCH0
0x4E68 RB3D_CMASK_PITCH1
0x4E6C RB3D_CMASK_PITCH2
0x4E70 RB3D_CMASK_PITCH3
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
/drivers/video/drm/radeon/reg_srcs/r420
0,0 → 1,780
r420 0x4f60
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
0x1D98 VAP_VPORT_XSCALE
0x1D9C VAP_VPORT_XOFFSET
0x1DA0 VAP_VPORT_YSCALE
0x1DA4 VAP_VPORT_YOFFSET
0x1DA8 VAP_VPORT_ZSCALE
0x1DAC VAP_VPORT_ZOFFSET
0x2080 VAP_CNTL
0x2090 VAP_OUT_VTX_FMT_0
0x2094 VAP_OUT_VTX_FMT_1
0x20B0 VAP_VTE_CNTL
0x2138 VAP_VF_MIN_VTX_INDX
0x2140 VAP_CNTL_STATUS
0x2150 VAP_PROG_STREAM_CNTL_0
0x2154 VAP_PROG_STREAM_CNTL_1
0x2158 VAP_PROG_STREAM_CNTL_2
0x215C VAP_PROG_STREAM_CNTL_3
0x2160 VAP_PROG_STREAM_CNTL_4
0x2164 VAP_PROG_STREAM_CNTL_5
0x2168 VAP_PROG_STREAM_CNTL_6
0x216C VAP_PROG_STREAM_CNTL_7
0x2180 VAP_VTX_STATE_CNTL
0x2184 VAP_VSM_VTX_ASSM
0x2188 VAP_VTX_STATE_IND_REG_0
0x218C VAP_VTX_STATE_IND_REG_1
0x2190 VAP_VTX_STATE_IND_REG_2
0x2194 VAP_VTX_STATE_IND_REG_3
0x2198 VAP_VTX_STATE_IND_REG_4
0x219C VAP_VTX_STATE_IND_REG_5
0x21A0 VAP_VTX_STATE_IND_REG_6
0x21A4 VAP_VTX_STATE_IND_REG_7
0x21A8 VAP_VTX_STATE_IND_REG_8
0x21AC VAP_VTX_STATE_IND_REG_9
0x21B0 VAP_VTX_STATE_IND_REG_10
0x21B4 VAP_VTX_STATE_IND_REG_11
0x21B8 VAP_VTX_STATE_IND_REG_12
0x21BC VAP_VTX_STATE_IND_REG_13
0x21C0 VAP_VTX_STATE_IND_REG_14
0x21C4 VAP_VTX_STATE_IND_REG_15
0x21DC VAP_PSC_SGN_NORM_CNTL
0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
0x21EC VAP_PROG_STREAM_CNTL_EXT_3
0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
0x21FC VAP_PROG_STREAM_CNTL_EXT_7
0x2200 VAP_PVS_VECTOR_INDX_REG
0x2204 VAP_PVS_VECTOR_DATA_REG
0x2208 VAP_PVS_VECTOR_DATA_REG_128
0x221C VAP_CLIP_CNTL
0x2220 VAP_GB_VERT_CLIP_ADJ
0x2224 VAP_GB_VERT_DISC_ADJ
0x2228 VAP_GB_HORZ_CLIP_ADJ
0x222C VAP_GB_HORZ_DISC_ADJ
0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
0x2284 VAP_PVS_STATE_FLUSH_REG
0x2288 VAP_PVS_VTX_TIMEOUT_REG
0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
0x22D0 VAP_PVS_CODE_CNTL_0
0x22D4 VAP_PVS_CONST_CNTL
0x22D8 VAP_PVS_CODE_CNTL_1
0x22DC VAP_PVS_FLOW_CNTL_OPC
0x342C RB2D_DSTCACHE_CTLSTAT
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
0x4010 GB_MSPOS0
0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
0x420C GA_POINT_T1
0x4214 GA_TRIANGLE_STIPPLE
0x421C GA_POINT_SIZE
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
0x4278 GA_COLOR_CONTROL
0x427C GA_SOLID_RG
0x4280 GA_SOLID_BA
0x4288 GA_POLY_MODE
0x428C GA_ROUND_MODE
0x4290 GA_OFFSET
0x4294 GA_FOG_SCALE
0x4298 GA_FOG_OFFSET
0x42A0 SU_TEX_WRAP
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
0x42C4 SU_DEPTH_OFFSET
0x42C8 SU_REG_DEST
0x4300 RS_COUNT
0x4304 RS_INST_COUNT
0x4310 RS_IP_0
0x4314 RS_IP_1
0x4318 RS_IP_2
0x431C RS_IP_3
0x4320 RS_IP_4
0x4324 RS_IP_5
0x4328 RS_IP_6
0x432C RS_IP_7
0x4330 RS_INST_0
0x4334 RS_INST_1
0x4338 RS_INST_2
0x433C RS_INST_3
0x4340 RS_INST_4
0x4344 RS_INST_5
0x4348 RS_INST_6
0x434C RS_INST_7
0x4350 RS_INST_8
0x4354 RS_INST_9
0x4358 RS_INST_10
0x435C RS_INST_11
0x4360 RS_INST_12
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
0x43B8 SC_CLIP_1_A
0x43BC SC_CLIP_1_B
0x43C0 SC_CLIP_2_A
0x43C4 SC_CLIP_2_B
0x43C8 SC_CLIP_3_A
0x43CC SC_CLIP_3_B
0x43D0 SC_CLIP_RULE
0x43E0 SC_SCISSOR0
0x43E8 SC_SCREENDOOR
0x4440 TX_FILTER1_0
0x4444 TX_FILTER1_1
0x4448 TX_FILTER1_2
0x444C TX_FILTER1_3
0x4450 TX_FILTER1_4
0x4454 TX_FILTER1_5
0x4458 TX_FILTER1_6
0x445C TX_FILTER1_7
0x4460 TX_FILTER1_8
0x4464 TX_FILTER1_9
0x4468 TX_FILTER1_10
0x446C TX_FILTER1_11
0x4470 TX_FILTER1_12
0x4474 TX_FILTER1_13
0x4478 TX_FILTER1_14
0x447C TX_FILTER1_15
0x4580 TX_CHROMA_KEY_0
0x4584 TX_CHROMA_KEY_1
0x4588 TX_CHROMA_KEY_2
0x458C TX_CHROMA_KEY_3
0x4590 TX_CHROMA_KEY_4
0x4594 TX_CHROMA_KEY_5
0x4598 TX_CHROMA_KEY_6
0x459C TX_CHROMA_KEY_7
0x45A0 TX_CHROMA_KEY_8
0x45A4 TX_CHROMA_KEY_9
0x45A8 TX_CHROMA_KEY_10
0x45AC TX_CHROMA_KEY_11
0x45B0 TX_CHROMA_KEY_12
0x45B4 TX_CHROMA_KEY_13
0x45B8 TX_CHROMA_KEY_14
0x45BC TX_CHROMA_KEY_15
0x45C0 TX_BORDER_COLOR_0
0x45C4 TX_BORDER_COLOR_1
0x45C8 TX_BORDER_COLOR_2
0x45CC TX_BORDER_COLOR_3
0x45D0 TX_BORDER_COLOR_4
0x45D4 TX_BORDER_COLOR_5
0x45D8 TX_BORDER_COLOR_6
0x45DC TX_BORDER_COLOR_7
0x45E0 TX_BORDER_COLOR_8
0x45E4 TX_BORDER_COLOR_9
0x45E8 TX_BORDER_COLOR_10
0x45EC TX_BORDER_COLOR_11
0x45F0 TX_BORDER_COLOR_12
0x45F4 TX_BORDER_COLOR_13
0x45F8 TX_BORDER_COLOR_14
0x45FC TX_BORDER_COLOR_15
0x4600 US_CONFIG
0x4604 US_PIXSIZE
0x4608 US_CODE_OFFSET
0x460C US_RESET
0x4610 US_CODE_ADDR_0
0x4614 US_CODE_ADDR_1
0x4618 US_CODE_ADDR_2
0x461C US_CODE_ADDR_3
0x4620 US_TEX_INST_0
0x4624 US_TEX_INST_1
0x4628 US_TEX_INST_2
0x462C US_TEX_INST_3
0x4630 US_TEX_INST_4
0x4634 US_TEX_INST_5
0x4638 US_TEX_INST_6
0x463C US_TEX_INST_7
0x4640 US_TEX_INST_8
0x4644 US_TEX_INST_9
0x4648 US_TEX_INST_10
0x464C US_TEX_INST_11
0x4650 US_TEX_INST_12
0x4654 US_TEX_INST_13
0x4658 US_TEX_INST_14
0x465C US_TEX_INST_15
0x4660 US_TEX_INST_16
0x4664 US_TEX_INST_17
0x4668 US_TEX_INST_18
0x466C US_TEX_INST_19
0x4670 US_TEX_INST_20
0x4674 US_TEX_INST_21
0x4678 US_TEX_INST_22
0x467C US_TEX_INST_23
0x4680 US_TEX_INST_24
0x4684 US_TEX_INST_25
0x4688 US_TEX_INST_26
0x468C US_TEX_INST_27
0x4690 US_TEX_INST_28
0x4694 US_TEX_INST_29
0x4698 US_TEX_INST_30
0x469C US_TEX_INST_31
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
0x46B8 US_CODE_BANK
0x46BC US_CODE_EXT
0x46C0 US_ALU_RGB_ADDR_0
0x46C4 US_ALU_RGB_ADDR_1
0x46C8 US_ALU_RGB_ADDR_2
0x46CC US_ALU_RGB_ADDR_3
0x46D0 US_ALU_RGB_ADDR_4
0x46D4 US_ALU_RGB_ADDR_5
0x46D8 US_ALU_RGB_ADDR_6
0x46DC US_ALU_RGB_ADDR_7
0x46E0 US_ALU_RGB_ADDR_8
0x46E4 US_ALU_RGB_ADDR_9
0x46E8 US_ALU_RGB_ADDR_10
0x46EC US_ALU_RGB_ADDR_11
0x46F0 US_ALU_RGB_ADDR_12
0x46F4 US_ALU_RGB_ADDR_13
0x46F8 US_ALU_RGB_ADDR_14
0x46FC US_ALU_RGB_ADDR_15
0x4700 US_ALU_RGB_ADDR_16
0x4704 US_ALU_RGB_ADDR_17
0x4708 US_ALU_RGB_ADDR_18
0x470C US_ALU_RGB_ADDR_19
0x4710 US_ALU_RGB_ADDR_20
0x4714 US_ALU_RGB_ADDR_21
0x4718 US_ALU_RGB_ADDR_22
0x471C US_ALU_RGB_ADDR_23
0x4720 US_ALU_RGB_ADDR_24
0x4724 US_ALU_RGB_ADDR_25
0x4728 US_ALU_RGB_ADDR_26
0x472C US_ALU_RGB_ADDR_27
0x4730 US_ALU_RGB_ADDR_28
0x4734 US_ALU_RGB_ADDR_29
0x4738 US_ALU_RGB_ADDR_30
0x473C US_ALU_RGB_ADDR_31
0x4740 US_ALU_RGB_ADDR_32
0x4744 US_ALU_RGB_ADDR_33
0x4748 US_ALU_RGB_ADDR_34
0x474C US_ALU_RGB_ADDR_35
0x4750 US_ALU_RGB_ADDR_36
0x4754 US_ALU_RGB_ADDR_37
0x4758 US_ALU_RGB_ADDR_38
0x475C US_ALU_RGB_ADDR_39
0x4760 US_ALU_RGB_ADDR_40
0x4764 US_ALU_RGB_ADDR_41
0x4768 US_ALU_RGB_ADDR_42
0x476C US_ALU_RGB_ADDR_43
0x4770 US_ALU_RGB_ADDR_44
0x4774 US_ALU_RGB_ADDR_45
0x4778 US_ALU_RGB_ADDR_46
0x477C US_ALU_RGB_ADDR_47
0x4780 US_ALU_RGB_ADDR_48
0x4784 US_ALU_RGB_ADDR_49
0x4788 US_ALU_RGB_ADDR_50
0x478C US_ALU_RGB_ADDR_51
0x4790 US_ALU_RGB_ADDR_52
0x4794 US_ALU_RGB_ADDR_53
0x4798 US_ALU_RGB_ADDR_54
0x479C US_ALU_RGB_ADDR_55
0x47A0 US_ALU_RGB_ADDR_56
0x47A4 US_ALU_RGB_ADDR_57
0x47A8 US_ALU_RGB_ADDR_58
0x47AC US_ALU_RGB_ADDR_59
0x47B0 US_ALU_RGB_ADDR_60
0x47B4 US_ALU_RGB_ADDR_61
0x47B8 US_ALU_RGB_ADDR_62
0x47BC US_ALU_RGB_ADDR_63
0x47C0 US_ALU_ALPHA_ADDR_0
0x47C4 US_ALU_ALPHA_ADDR_1
0x47C8 US_ALU_ALPHA_ADDR_2
0x47CC US_ALU_ALPHA_ADDR_3
0x47D0 US_ALU_ALPHA_ADDR_4
0x47D4 US_ALU_ALPHA_ADDR_5
0x47D8 US_ALU_ALPHA_ADDR_6
0x47DC US_ALU_ALPHA_ADDR_7
0x47E0 US_ALU_ALPHA_ADDR_8
0x47E4 US_ALU_ALPHA_ADDR_9
0x47E8 US_ALU_ALPHA_ADDR_10
0x47EC US_ALU_ALPHA_ADDR_11
0x47F0 US_ALU_ALPHA_ADDR_12
0x47F4 US_ALU_ALPHA_ADDR_13
0x47F8 US_ALU_ALPHA_ADDR_14
0x47FC US_ALU_ALPHA_ADDR_15
0x4800 US_ALU_ALPHA_ADDR_16
0x4804 US_ALU_ALPHA_ADDR_17
0x4808 US_ALU_ALPHA_ADDR_18
0x480C US_ALU_ALPHA_ADDR_19
0x4810 US_ALU_ALPHA_ADDR_20
0x4814 US_ALU_ALPHA_ADDR_21
0x4818 US_ALU_ALPHA_ADDR_22
0x481C US_ALU_ALPHA_ADDR_23
0x4820 US_ALU_ALPHA_ADDR_24
0x4824 US_ALU_ALPHA_ADDR_25
0x4828 US_ALU_ALPHA_ADDR_26
0x482C US_ALU_ALPHA_ADDR_27
0x4830 US_ALU_ALPHA_ADDR_28
0x4834 US_ALU_ALPHA_ADDR_29
0x4838 US_ALU_ALPHA_ADDR_30
0x483C US_ALU_ALPHA_ADDR_31
0x4840 US_ALU_ALPHA_ADDR_32
0x4844 US_ALU_ALPHA_ADDR_33
0x4848 US_ALU_ALPHA_ADDR_34
0x484C US_ALU_ALPHA_ADDR_35
0x4850 US_ALU_ALPHA_ADDR_36
0x4854 US_ALU_ALPHA_ADDR_37
0x4858 US_ALU_ALPHA_ADDR_38
0x485C US_ALU_ALPHA_ADDR_39
0x4860 US_ALU_ALPHA_ADDR_40
0x4864 US_ALU_ALPHA_ADDR_41
0x4868 US_ALU_ALPHA_ADDR_42
0x486C US_ALU_ALPHA_ADDR_43
0x4870 US_ALU_ALPHA_ADDR_44
0x4874 US_ALU_ALPHA_ADDR_45
0x4878 US_ALU_ALPHA_ADDR_46
0x487C US_ALU_ALPHA_ADDR_47
0x4880 US_ALU_ALPHA_ADDR_48
0x4884 US_ALU_ALPHA_ADDR_49
0x4888 US_ALU_ALPHA_ADDR_50
0x488C US_ALU_ALPHA_ADDR_51
0x4890 US_ALU_ALPHA_ADDR_52
0x4894 US_ALU_ALPHA_ADDR_53
0x4898 US_ALU_ALPHA_ADDR_54
0x489C US_ALU_ALPHA_ADDR_55
0x48A0 US_ALU_ALPHA_ADDR_56
0x48A4 US_ALU_ALPHA_ADDR_57
0x48A8 US_ALU_ALPHA_ADDR_58
0x48AC US_ALU_ALPHA_ADDR_59
0x48B0 US_ALU_ALPHA_ADDR_60
0x48B4 US_ALU_ALPHA_ADDR_61
0x48B8 US_ALU_ALPHA_ADDR_62
0x48BC US_ALU_ALPHA_ADDR_63
0x48C0 US_ALU_RGB_INST_0
0x48C4 US_ALU_RGB_INST_1
0x48C8 US_ALU_RGB_INST_2
0x48CC US_ALU_RGB_INST_3
0x48D0 US_ALU_RGB_INST_4
0x48D4 US_ALU_RGB_INST_5
0x48D8 US_ALU_RGB_INST_6
0x48DC US_ALU_RGB_INST_7
0x48E0 US_ALU_RGB_INST_8
0x48E4 US_ALU_RGB_INST_9
0x48E8 US_ALU_RGB_INST_10
0x48EC US_ALU_RGB_INST_11
0x48F0 US_ALU_RGB_INST_12
0x48F4 US_ALU_RGB_INST_13
0x48F8 US_ALU_RGB_INST_14
0x48FC US_ALU_RGB_INST_15
0x4900 US_ALU_RGB_INST_16
0x4904 US_ALU_RGB_INST_17
0x4908 US_ALU_RGB_INST_18
0x490C US_ALU_RGB_INST_19
0x4910 US_ALU_RGB_INST_20
0x4914 US_ALU_RGB_INST_21
0x4918 US_ALU_RGB_INST_22
0x491C US_ALU_RGB_INST_23
0x4920 US_ALU_RGB_INST_24
0x4924 US_ALU_RGB_INST_25
0x4928 US_ALU_RGB_INST_26
0x492C US_ALU_RGB_INST_27
0x4930 US_ALU_RGB_INST_28
0x4934 US_ALU_RGB_INST_29
0x4938 US_ALU_RGB_INST_30
0x493C US_ALU_RGB_INST_31
0x4940 US_ALU_RGB_INST_32
0x4944 US_ALU_RGB_INST_33
0x4948 US_ALU_RGB_INST_34
0x494C US_ALU_RGB_INST_35
0x4950 US_ALU_RGB_INST_36
0x4954 US_ALU_RGB_INST_37
0x4958 US_ALU_RGB_INST_38
0x495C US_ALU_RGB_INST_39
0x4960 US_ALU_RGB_INST_40
0x4964 US_ALU_RGB_INST_41
0x4968 US_ALU_RGB_INST_42
0x496C US_ALU_RGB_INST_43
0x4970 US_ALU_RGB_INST_44
0x4974 US_ALU_RGB_INST_45
0x4978 US_ALU_RGB_INST_46
0x497C US_ALU_RGB_INST_47
0x4980 US_ALU_RGB_INST_48
0x4984 US_ALU_RGB_INST_49
0x4988 US_ALU_RGB_INST_50
0x498C US_ALU_RGB_INST_51
0x4990 US_ALU_RGB_INST_52
0x4994 US_ALU_RGB_INST_53
0x4998 US_ALU_RGB_INST_54
0x499C US_ALU_RGB_INST_55
0x49A0 US_ALU_RGB_INST_56
0x49A4 US_ALU_RGB_INST_57
0x49A8 US_ALU_RGB_INST_58
0x49AC US_ALU_RGB_INST_59
0x49B0 US_ALU_RGB_INST_60
0x49B4 US_ALU_RGB_INST_61
0x49B8 US_ALU_RGB_INST_62
0x49BC US_ALU_RGB_INST_63
0x49C0 US_ALU_ALPHA_INST_0
0x49C4 US_ALU_ALPHA_INST_1
0x49C8 US_ALU_ALPHA_INST_2
0x49CC US_ALU_ALPHA_INST_3
0x49D0 US_ALU_ALPHA_INST_4
0x49D4 US_ALU_ALPHA_INST_5
0x49D8 US_ALU_ALPHA_INST_6
0x49DC US_ALU_ALPHA_INST_7
0x49E0 US_ALU_ALPHA_INST_8
0x49E4 US_ALU_ALPHA_INST_9
0x49E8 US_ALU_ALPHA_INST_10
0x49EC US_ALU_ALPHA_INST_11
0x49F0 US_ALU_ALPHA_INST_12
0x49F4 US_ALU_ALPHA_INST_13
0x49F8 US_ALU_ALPHA_INST_14
0x49FC US_ALU_ALPHA_INST_15
0x4A00 US_ALU_ALPHA_INST_16
0x4A04 US_ALU_ALPHA_INST_17
0x4A08 US_ALU_ALPHA_INST_18
0x4A0C US_ALU_ALPHA_INST_19
0x4A10 US_ALU_ALPHA_INST_20
0x4A14 US_ALU_ALPHA_INST_21
0x4A18 US_ALU_ALPHA_INST_22
0x4A1C US_ALU_ALPHA_INST_23
0x4A20 US_ALU_ALPHA_INST_24
0x4A24 US_ALU_ALPHA_INST_25
0x4A28 US_ALU_ALPHA_INST_26
0x4A2C US_ALU_ALPHA_INST_27
0x4A30 US_ALU_ALPHA_INST_28
0x4A34 US_ALU_ALPHA_INST_29
0x4A38 US_ALU_ALPHA_INST_30
0x4A3C US_ALU_ALPHA_INST_31
0x4A40 US_ALU_ALPHA_INST_32
0x4A44 US_ALU_ALPHA_INST_33
0x4A48 US_ALU_ALPHA_INST_34
0x4A4C US_ALU_ALPHA_INST_35
0x4A50 US_ALU_ALPHA_INST_36
0x4A54 US_ALU_ALPHA_INST_37
0x4A58 US_ALU_ALPHA_INST_38
0x4A5C US_ALU_ALPHA_INST_39
0x4A60 US_ALU_ALPHA_INST_40
0x4A64 US_ALU_ALPHA_INST_41
0x4A68 US_ALU_ALPHA_INST_42
0x4A6C US_ALU_ALPHA_INST_43
0x4A70 US_ALU_ALPHA_INST_44
0x4A74 US_ALU_ALPHA_INST_45
0x4A78 US_ALU_ALPHA_INST_46
0x4A7C US_ALU_ALPHA_INST_47
0x4A80 US_ALU_ALPHA_INST_48
0x4A84 US_ALU_ALPHA_INST_49
0x4A88 US_ALU_ALPHA_INST_50
0x4A8C US_ALU_ALPHA_INST_51
0x4A90 US_ALU_ALPHA_INST_52
0x4A94 US_ALU_ALPHA_INST_53
0x4A98 US_ALU_ALPHA_INST_54
0x4A9C US_ALU_ALPHA_INST_55
0x4AA0 US_ALU_ALPHA_INST_56
0x4AA4 US_ALU_ALPHA_INST_57
0x4AA8 US_ALU_ALPHA_INST_58
0x4AAC US_ALU_ALPHA_INST_59
0x4AB0 US_ALU_ALPHA_INST_60
0x4AB4 US_ALU_ALPHA_INST_61
0x4AB8 US_ALU_ALPHA_INST_62
0x4ABC US_ALU_ALPHA_INST_63
0x4AC0 US_ALU_EXT_ADDR_0
0x4AC4 US_ALU_EXT_ADDR_1
0x4AC8 US_ALU_EXT_ADDR_2
0x4ACC US_ALU_EXT_ADDR_3
0x4AD0 US_ALU_EXT_ADDR_4
0x4AD4 US_ALU_EXT_ADDR_5
0x4AD8 US_ALU_EXT_ADDR_6
0x4ADC US_ALU_EXT_ADDR_7
0x4AE0 US_ALU_EXT_ADDR_8
0x4AE4 US_ALU_EXT_ADDR_9
0x4AE8 US_ALU_EXT_ADDR_10
0x4AEC US_ALU_EXT_ADDR_11
0x4AF0 US_ALU_EXT_ADDR_12
0x4AF4 US_ALU_EXT_ADDR_13
0x4AF8 US_ALU_EXT_ADDR_14
0x4AFC US_ALU_EXT_ADDR_15
0x4B00 US_ALU_EXT_ADDR_16
0x4B04 US_ALU_EXT_ADDR_17
0x4B08 US_ALU_EXT_ADDR_18
0x4B0C US_ALU_EXT_ADDR_19
0x4B10 US_ALU_EXT_ADDR_20
0x4B14 US_ALU_EXT_ADDR_21
0x4B18 US_ALU_EXT_ADDR_22
0x4B1C US_ALU_EXT_ADDR_23
0x4B20 US_ALU_EXT_ADDR_24
0x4B24 US_ALU_EXT_ADDR_25
0x4B28 US_ALU_EXT_ADDR_26
0x4B2C US_ALU_EXT_ADDR_27
0x4B30 US_ALU_EXT_ADDR_28
0x4B34 US_ALU_EXT_ADDR_29
0x4B38 US_ALU_EXT_ADDR_30
0x4B3C US_ALU_EXT_ADDR_31
0x4B40 US_ALU_EXT_ADDR_32
0x4B44 US_ALU_EXT_ADDR_33
0x4B48 US_ALU_EXT_ADDR_34
0x4B4C US_ALU_EXT_ADDR_35
0x4B50 US_ALU_EXT_ADDR_36
0x4B54 US_ALU_EXT_ADDR_37
0x4B58 US_ALU_EXT_ADDR_38
0x4B5C US_ALU_EXT_ADDR_39
0x4B60 US_ALU_EXT_ADDR_40
0x4B64 US_ALU_EXT_ADDR_41
0x4B68 US_ALU_EXT_ADDR_42
0x4B6C US_ALU_EXT_ADDR_43
0x4B70 US_ALU_EXT_ADDR_44
0x4B74 US_ALU_EXT_ADDR_45
0x4B78 US_ALU_EXT_ADDR_46
0x4B7C US_ALU_EXT_ADDR_47
0x4B80 US_ALU_EXT_ADDR_48
0x4B84 US_ALU_EXT_ADDR_49
0x4B88 US_ALU_EXT_ADDR_50
0x4B8C US_ALU_EXT_ADDR_51
0x4B90 US_ALU_EXT_ADDR_52
0x4B94 US_ALU_EXT_ADDR_53
0x4B98 US_ALU_EXT_ADDR_54
0x4B9C US_ALU_EXT_ADDR_55
0x4BA0 US_ALU_EXT_ADDR_56
0x4BA4 US_ALU_EXT_ADDR_57
0x4BA8 US_ALU_EXT_ADDR_58
0x4BAC US_ALU_EXT_ADDR_59
0x4BB0 US_ALU_EXT_ADDR_60
0x4BB4 US_ALU_EXT_ADDR_61
0x4BB8 US_ALU_EXT_ADDR_62
0x4BBC US_ALU_EXT_ADDR_63
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
0x4BCC FG_FOG_COLOR_G
0x4BD0 FG_FOG_COLOR_B
0x4BD4 FG_ALPHA_FUNC
0x4BD8 FG_DEPTH_SRC
0x4C00 US_ALU_CONST_R_0
0x4C04 US_ALU_CONST_G_0
0x4C08 US_ALU_CONST_B_0
0x4C0C US_ALU_CONST_A_0
0x4C10 US_ALU_CONST_R_1
0x4C14 US_ALU_CONST_G_1
0x4C18 US_ALU_CONST_B_1
0x4C1C US_ALU_CONST_A_1
0x4C20 US_ALU_CONST_R_2
0x4C24 US_ALU_CONST_G_2
0x4C28 US_ALU_CONST_B_2
0x4C2C US_ALU_CONST_A_2
0x4C30 US_ALU_CONST_R_3
0x4C34 US_ALU_CONST_G_3
0x4C38 US_ALU_CONST_B_3
0x4C3C US_ALU_CONST_A_3
0x4C40 US_ALU_CONST_R_4
0x4C44 US_ALU_CONST_G_4
0x4C48 US_ALU_CONST_B_4
0x4C4C US_ALU_CONST_A_4
0x4C50 US_ALU_CONST_R_5
0x4C54 US_ALU_CONST_G_5
0x4C58 US_ALU_CONST_B_5
0x4C5C US_ALU_CONST_A_5
0x4C60 US_ALU_CONST_R_6
0x4C64 US_ALU_CONST_G_6
0x4C68 US_ALU_CONST_B_6
0x4C6C US_ALU_CONST_A_6
0x4C70 US_ALU_CONST_R_7
0x4C74 US_ALU_CONST_G_7
0x4C78 US_ALU_CONST_B_7
0x4C7C US_ALU_CONST_A_7
0x4C80 US_ALU_CONST_R_8
0x4C84 US_ALU_CONST_G_8
0x4C88 US_ALU_CONST_B_8
0x4C8C US_ALU_CONST_A_8
0x4C90 US_ALU_CONST_R_9
0x4C94 US_ALU_CONST_G_9
0x4C98 US_ALU_CONST_B_9
0x4C9C US_ALU_CONST_A_9
0x4CA0 US_ALU_CONST_R_10
0x4CA4 US_ALU_CONST_G_10
0x4CA8 US_ALU_CONST_B_10
0x4CAC US_ALU_CONST_A_10
0x4CB0 US_ALU_CONST_R_11
0x4CB4 US_ALU_CONST_G_11
0x4CB8 US_ALU_CONST_B_11
0x4CBC US_ALU_CONST_A_11
0x4CC0 US_ALU_CONST_R_12
0x4CC4 US_ALU_CONST_G_12
0x4CC8 US_ALU_CONST_B_12
0x4CCC US_ALU_CONST_A_12
0x4CD0 US_ALU_CONST_R_13
0x4CD4 US_ALU_CONST_G_13
0x4CD8 US_ALU_CONST_B_13
0x4CDC US_ALU_CONST_A_13
0x4CE0 US_ALU_CONST_R_14
0x4CE4 US_ALU_CONST_G_14
0x4CE8 US_ALU_CONST_B_14
0x4CEC US_ALU_CONST_A_14
0x4CF0 US_ALU_CONST_R_15
0x4CF4 US_ALU_CONST_G_15
0x4CF8 US_ALU_CONST_B_15
0x4CFC US_ALU_CONST_A_15
0x4D00 US_ALU_CONST_R_16
0x4D04 US_ALU_CONST_G_16
0x4D08 US_ALU_CONST_B_16
0x4D0C US_ALU_CONST_A_16
0x4D10 US_ALU_CONST_R_17
0x4D14 US_ALU_CONST_G_17
0x4D18 US_ALU_CONST_B_17
0x4D1C US_ALU_CONST_A_17
0x4D20 US_ALU_CONST_R_18
0x4D24 US_ALU_CONST_G_18
0x4D28 US_ALU_CONST_B_18
0x4D2C US_ALU_CONST_A_18
0x4D30 US_ALU_CONST_R_19
0x4D34 US_ALU_CONST_G_19
0x4D38 US_ALU_CONST_B_19
0x4D3C US_ALU_CONST_A_19
0x4D40 US_ALU_CONST_R_20
0x4D44 US_ALU_CONST_G_20
0x4D48 US_ALU_CONST_B_20
0x4D4C US_ALU_CONST_A_20
0x4D50 US_ALU_CONST_R_21
0x4D54 US_ALU_CONST_G_21
0x4D58 US_ALU_CONST_B_21
0x4D5C US_ALU_CONST_A_21
0x4D60 US_ALU_CONST_R_22
0x4D64 US_ALU_CONST_G_22
0x4D68 US_ALU_CONST_B_22
0x4D6C US_ALU_CONST_A_22
0x4D70 US_ALU_CONST_R_23
0x4D74 US_ALU_CONST_G_23
0x4D78 US_ALU_CONST_B_23
0x4D7C US_ALU_CONST_A_23
0x4D80 US_ALU_CONST_R_24
0x4D84 US_ALU_CONST_G_24
0x4D88 US_ALU_CONST_B_24
0x4D8C US_ALU_CONST_A_24
0x4D90 US_ALU_CONST_R_25
0x4D94 US_ALU_CONST_G_25
0x4D98 US_ALU_CONST_B_25
0x4D9C US_ALU_CONST_A_25
0x4DA0 US_ALU_CONST_R_26
0x4DA4 US_ALU_CONST_G_26
0x4DA8 US_ALU_CONST_B_26
0x4DAC US_ALU_CONST_A_26
0x4DB0 US_ALU_CONST_R_27
0x4DB4 US_ALU_CONST_G_27
0x4DB8 US_ALU_CONST_B_27
0x4DBC US_ALU_CONST_A_27
0x4DC0 US_ALU_CONST_R_28
0x4DC4 US_ALU_CONST_G_28
0x4DC8 US_ALU_CONST_B_28
0x4DCC US_ALU_CONST_A_28
0x4DD0 US_ALU_CONST_R_29
0x4DD4 US_ALU_CONST_G_29
0x4DD8 US_ALU_CONST_B_29
0x4DDC US_ALU_CONST_A_29
0x4DE0 US_ALU_CONST_R_30
0x4DE4 US_ALU_CONST_G_30
0x4DE8 US_ALU_CONST_B_30
0x4DEC US_ALU_CONST_A_30
0x4DF0 US_ALU_CONST_R_31
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
0x4E08 RB3D_ABLENDCNTL_R3
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
0x4E1C RB3D_CLRCMP_FLIPE_R3
0x4E20 RB3D_CLRCMP_CLR_R3
0x4E24 RB3D_CLRCMP_MSK_R3
0x4E48 RB3D_DEBUG_CTL
0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
0x4E50 RB3D_DITHER_CTL
0x4E54 RB3D_CMASK_OFFSET0
0x4E58 RB3D_CMASK_OFFSET1
0x4E5C RB3D_CMASK_OFFSET2
0x4E60 RB3D_CMASK_OFFSET3
0x4E64 RB3D_CMASK_PITCH0
0x4E68 RB3D_CMASK_PITCH1
0x4E6C RB3D_CMASK_PITCH2
0x4E70 RB3D_CMASK_PITCH3
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
/drivers/video/drm/radeon/reg_srcs/r600
0,0 → 1,755
r600 0x9400
0x000287A0 R7xx_CB_SHADER_CONTROL
0x00028230 R7xx_PA_SC_EDGERULE
0x000286C8 R7xx_SPI_THREAD_GROUPING
0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
0x00008490 CP_STRMOUT_CNTL
0x000085F0 CP_COHER_CNTL
0x000085F4 CP_COHER_SIZE
0x000088C4 VGT_CACHE_INVALIDATION
0x00028A50 VGT_ENHANCE
0x000088CC VGT_ES_PER_GS
0x00028A2C VGT_GROUP_DECR
0x00028A28 VGT_GROUP_FIRST_DECR
0x00028A24 VGT_GROUP_PRIM_TYPE
0x00028A30 VGT_GROUP_VECT_0_CNTL
0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
0x00028A34 VGT_GROUP_VECT_1_CNTL
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
0x00028A14 VGT_HOS_CNTL
0x00028A18 VGT_HOS_MAX_TESS_LEVEL
0x00028A1C VGT_HOS_MIN_TESS_LEVEL
0x00028A20 VGT_HOS_REUSE_DEPTH
0x0000895C VGT_INDEX_TYPE
0x00028408 VGT_INDX_OFFSET
0x00028AA0 VGT_INSTANCE_STEP_RATE_0
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
0x00028400 VGT_MAX_VTX_INDX
0x00028404 VGT_MIN_VTX_INDX
0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
0x00008970 VGT_NUM_INDICES
0x00008974 VGT_NUM_INSTANCES
0x00028A10 VGT_OUTPUT_PATH_CNTL
0x00028A84 VGT_PRIMITIVEID_EN
0x00008958 VGT_PRIMITIVE_TYPE
0x00028AB4 VGT_REUSE_OFF
0x00028AB8 VGT_VTX_CNT_EN
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x00028AD4 VGT_STRMOUT_VTX_STRIDE_0
0x00028AE4 VGT_STRMOUT_VTX_STRIDE_1
0x00028AF4 VGT_STRMOUT_VTX_STRIDE_2
0x00028B04 VGT_STRMOUT_VTX_STRIDE_3
0x00028B28 VGT_STRMOUT_DRAW_OPAQUE_OFFSET
0x00028B2C VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE
0x00028B30 VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE
0x00028810 PA_CL_CLIP_CNTL
0x00008A14 PA_CL_ENHANCE
0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
0x00028C10 PA_CL_GB_VERT_DISC_ADJ
0x00028820 PA_CL_NANINF_CNTL
0x00028E1C PA_CL_POINT_CULL_RAD
0x00028E18 PA_CL_POINT_SIZE
0x00028E10 PA_CL_POINT_X_RAD
0x00028E14 PA_CL_POINT_Y_RAD
0x00028E2C PA_CL_UCP_0_W
0x00028E3C PA_CL_UCP_1_W
0x00028E4C PA_CL_UCP_2_W
0x00028E5C PA_CL_UCP_3_W
0x00028E6C PA_CL_UCP_4_W
0x00028E7C PA_CL_UCP_5_W
0x00028E20 PA_CL_UCP_0_X
0x00028E30 PA_CL_UCP_1_X
0x00028E40 PA_CL_UCP_2_X
0x00028E50 PA_CL_UCP_3_X
0x00028E60 PA_CL_UCP_4_X
0x00028E70 PA_CL_UCP_5_X
0x00028E24 PA_CL_UCP_0_Y
0x00028E34 PA_CL_UCP_1_Y
0x00028E44 PA_CL_UCP_2_Y
0x00028E54 PA_CL_UCP_3_Y
0x00028E64 PA_CL_UCP_4_Y
0x00028E74 PA_CL_UCP_5_Y
0x00028E28 PA_CL_UCP_0_Z
0x00028E38 PA_CL_UCP_1_Z
0x00028E48 PA_CL_UCP_2_Z
0x00028E58 PA_CL_UCP_3_Z
0x00028E68 PA_CL_UCP_4_Z
0x00028E78 PA_CL_UCP_5_Z
0x00028440 PA_CL_VPORT_XOFFSET_0
0x00028458 PA_CL_VPORT_XOFFSET_1
0x00028470 PA_CL_VPORT_XOFFSET_2
0x00028488 PA_CL_VPORT_XOFFSET_3
0x000284A0 PA_CL_VPORT_XOFFSET_4
0x000284B8 PA_CL_VPORT_XOFFSET_5
0x000284D0 PA_CL_VPORT_XOFFSET_6
0x000284E8 PA_CL_VPORT_XOFFSET_7
0x00028500 PA_CL_VPORT_XOFFSET_8
0x00028518 PA_CL_VPORT_XOFFSET_9
0x00028530 PA_CL_VPORT_XOFFSET_10
0x00028548 PA_CL_VPORT_XOFFSET_11
0x00028560 PA_CL_VPORT_XOFFSET_12
0x00028578 PA_CL_VPORT_XOFFSET_13
0x00028590 PA_CL_VPORT_XOFFSET_14
0x000285A8 PA_CL_VPORT_XOFFSET_15
0x0002843C PA_CL_VPORT_XSCALE_0
0x00028454 PA_CL_VPORT_XSCALE_1
0x0002846C PA_CL_VPORT_XSCALE_2
0x00028484 PA_CL_VPORT_XSCALE_3
0x0002849C PA_CL_VPORT_XSCALE_4
0x000284B4 PA_CL_VPORT_XSCALE_5
0x000284CC PA_CL_VPORT_XSCALE_6
0x000284E4 PA_CL_VPORT_XSCALE_7
0x000284FC PA_CL_VPORT_XSCALE_8
0x00028514 PA_CL_VPORT_XSCALE_9
0x0002852C PA_CL_VPORT_XSCALE_10
0x00028544 PA_CL_VPORT_XSCALE_11
0x0002855C PA_CL_VPORT_XSCALE_12
0x00028574 PA_CL_VPORT_XSCALE_13
0x0002858C PA_CL_VPORT_XSCALE_14
0x000285A4 PA_CL_VPORT_XSCALE_15
0x00028448 PA_CL_VPORT_YOFFSET_0
0x00028460 PA_CL_VPORT_YOFFSET_1
0x00028478 PA_CL_VPORT_YOFFSET_2
0x00028490 PA_CL_VPORT_YOFFSET_3
0x000284A8 PA_CL_VPORT_YOFFSET_4
0x000284C0 PA_CL_VPORT_YOFFSET_5
0x000284D8 PA_CL_VPORT_YOFFSET_6
0x000284F0 PA_CL_VPORT_YOFFSET_7
0x00028508 PA_CL_VPORT_YOFFSET_8
0x00028520 PA_CL_VPORT_YOFFSET_9
0x00028538 PA_CL_VPORT_YOFFSET_10
0x00028550 PA_CL_VPORT_YOFFSET_11
0x00028568 PA_CL_VPORT_YOFFSET_12
0x00028580 PA_CL_VPORT_YOFFSET_13
0x00028598 PA_CL_VPORT_YOFFSET_14
0x000285B0 PA_CL_VPORT_YOFFSET_15
0x00028444 PA_CL_VPORT_YSCALE_0
0x0002845C PA_CL_VPORT_YSCALE_1
0x00028474 PA_CL_VPORT_YSCALE_2
0x0002848C PA_CL_VPORT_YSCALE_3
0x000284A4 PA_CL_VPORT_YSCALE_4
0x000284BC PA_CL_VPORT_YSCALE_5
0x000284D4 PA_CL_VPORT_YSCALE_6
0x000284EC PA_CL_VPORT_YSCALE_7
0x00028504 PA_CL_VPORT_YSCALE_8
0x0002851C PA_CL_VPORT_YSCALE_9
0x00028534 PA_CL_VPORT_YSCALE_10
0x0002854C PA_CL_VPORT_YSCALE_11
0x00028564 PA_CL_VPORT_YSCALE_12
0x0002857C PA_CL_VPORT_YSCALE_13
0x00028594 PA_CL_VPORT_YSCALE_14
0x000285AC PA_CL_VPORT_YSCALE_15
0x00028450 PA_CL_VPORT_ZOFFSET_0
0x00028468 PA_CL_VPORT_ZOFFSET_1
0x00028480 PA_CL_VPORT_ZOFFSET_2
0x00028498 PA_CL_VPORT_ZOFFSET_3
0x000284B0 PA_CL_VPORT_ZOFFSET_4
0x000284C8 PA_CL_VPORT_ZOFFSET_5
0x000284E0 PA_CL_VPORT_ZOFFSET_6
0x000284F8 PA_CL_VPORT_ZOFFSET_7
0x00028510 PA_CL_VPORT_ZOFFSET_8
0x00028528 PA_CL_VPORT_ZOFFSET_9
0x00028540 PA_CL_VPORT_ZOFFSET_10
0x00028558 PA_CL_VPORT_ZOFFSET_11
0x00028570 PA_CL_VPORT_ZOFFSET_12
0x00028588 PA_CL_VPORT_ZOFFSET_13
0x000285A0 PA_CL_VPORT_ZOFFSET_14
0x000285B8 PA_CL_VPORT_ZOFFSET_15
0x0002844C PA_CL_VPORT_ZSCALE_0
0x00028464 PA_CL_VPORT_ZSCALE_1
0x0002847C PA_CL_VPORT_ZSCALE_2
0x00028494 PA_CL_VPORT_ZSCALE_3
0x000284AC PA_CL_VPORT_ZSCALE_4
0x000284C4 PA_CL_VPORT_ZSCALE_5
0x000284DC PA_CL_VPORT_ZSCALE_6
0x000284F4 PA_CL_VPORT_ZSCALE_7
0x0002850C PA_CL_VPORT_ZSCALE_8
0x00028524 PA_CL_VPORT_ZSCALE_9
0x0002853C PA_CL_VPORT_ZSCALE_10
0x00028554 PA_CL_VPORT_ZSCALE_11
0x0002856C PA_CL_VPORT_ZSCALE_12
0x00028584 PA_CL_VPORT_ZSCALE_13
0x0002859C PA_CL_VPORT_ZSCALE_14
0x000285B4 PA_CL_VPORT_ZSCALE_15
0x0002881C PA_CL_VS_OUT_CNTL
0x00028818 PA_CL_VTE_CNTL
0x00028C48 PA_SC_AA_MASK
0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
0x00028214 PA_SC_CLIPRECT_0_BR
0x0002821C PA_SC_CLIPRECT_1_BR
0x00028224 PA_SC_CLIPRECT_2_BR
0x0002822C PA_SC_CLIPRECT_3_BR
0x00028210 PA_SC_CLIPRECT_0_TL
0x00028218 PA_SC_CLIPRECT_1_TL
0x00028220 PA_SC_CLIPRECT_2_TL
0x00028228 PA_SC_CLIPRECT_3_TL
0x0002820C PA_SC_CLIPRECT_RULE
0x00008BF0 PA_SC_ENHANCE
0x00028244 PA_SC_GENERIC_SCISSOR_BR
0x00028240 PA_SC_GENERIC_SCISSOR_TL
0x00028C00 PA_SC_LINE_CNTL
0x00028A0C PA_SC_LINE_STIPPLE
0x00008B10 PA_SC_LINE_STIPPLE_STATE
0x00028A4C PA_SC_MODE_CNTL
0x00028A48 PA_SC_MPASS_PS_CNTL
0x00008B20 PA_SC_MULTI_CHIP_CNTL
0x00028034 PA_SC_SCREEN_SCISSOR_BR
0x00028030 PA_SC_SCREEN_SCISSOR_TL
0x00028254 PA_SC_VPORT_SCISSOR_0_BR
0x0002825C PA_SC_VPORT_SCISSOR_1_BR
0x00028264 PA_SC_VPORT_SCISSOR_2_BR
0x0002826C PA_SC_VPORT_SCISSOR_3_BR
0x00028274 PA_SC_VPORT_SCISSOR_4_BR
0x0002827C PA_SC_VPORT_SCISSOR_5_BR
0x00028284 PA_SC_VPORT_SCISSOR_6_BR
0x0002828C PA_SC_VPORT_SCISSOR_7_BR
0x00028294 PA_SC_VPORT_SCISSOR_8_BR
0x0002829C PA_SC_VPORT_SCISSOR_9_BR
0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
0x000282AC PA_SC_VPORT_SCISSOR_11_BR
0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
0x000282BC PA_SC_VPORT_SCISSOR_13_BR
0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
0x000282CC PA_SC_VPORT_SCISSOR_15_BR
0x00028250 PA_SC_VPORT_SCISSOR_0_TL
0x00028258 PA_SC_VPORT_SCISSOR_1_TL
0x00028260 PA_SC_VPORT_SCISSOR_2_TL
0x00028268 PA_SC_VPORT_SCISSOR_3_TL
0x00028270 PA_SC_VPORT_SCISSOR_4_TL
0x00028278 PA_SC_VPORT_SCISSOR_5_TL
0x00028280 PA_SC_VPORT_SCISSOR_6_TL
0x00028288 PA_SC_VPORT_SCISSOR_7_TL
0x00028290 PA_SC_VPORT_SCISSOR_8_TL
0x00028298 PA_SC_VPORT_SCISSOR_9_TL
0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
0x000282D4 PA_SC_VPORT_ZMAX_0
0x000282DC PA_SC_VPORT_ZMAX_1
0x000282E4 PA_SC_VPORT_ZMAX_2
0x000282EC PA_SC_VPORT_ZMAX_3
0x000282F4 PA_SC_VPORT_ZMAX_4
0x000282FC PA_SC_VPORT_ZMAX_5
0x00028304 PA_SC_VPORT_ZMAX_6
0x0002830C PA_SC_VPORT_ZMAX_7
0x00028314 PA_SC_VPORT_ZMAX_8
0x0002831C PA_SC_VPORT_ZMAX_9
0x00028324 PA_SC_VPORT_ZMAX_10
0x0002832C PA_SC_VPORT_ZMAX_11
0x00028334 PA_SC_VPORT_ZMAX_12
0x0002833C PA_SC_VPORT_ZMAX_13
0x00028344 PA_SC_VPORT_ZMAX_14
0x0002834C PA_SC_VPORT_ZMAX_15
0x000282D0 PA_SC_VPORT_ZMIN_0
0x000282D8 PA_SC_VPORT_ZMIN_1
0x000282E0 PA_SC_VPORT_ZMIN_2
0x000282E8 PA_SC_VPORT_ZMIN_3
0x000282F0 PA_SC_VPORT_ZMIN_4
0x000282F8 PA_SC_VPORT_ZMIN_5
0x00028300 PA_SC_VPORT_ZMIN_6
0x00028308 PA_SC_VPORT_ZMIN_7
0x00028310 PA_SC_VPORT_ZMIN_8
0x00028318 PA_SC_VPORT_ZMIN_9
0x00028320 PA_SC_VPORT_ZMIN_10
0x00028328 PA_SC_VPORT_ZMIN_11
0x00028330 PA_SC_VPORT_ZMIN_12
0x00028338 PA_SC_VPORT_ZMIN_13
0x00028340 PA_SC_VPORT_ZMIN_14
0x00028348 PA_SC_VPORT_ZMIN_15
0x00028200 PA_SC_WINDOW_OFFSET
0x00028208 PA_SC_WINDOW_SCISSOR_BR
0x00028204 PA_SC_WINDOW_SCISSOR_TL
0x00028A08 PA_SU_LINE_CNTL
0x00028A04 PA_SU_POINT_MINMAX
0x00028A00 PA_SU_POINT_SIZE
0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
0x00028DFC PA_SU_POLY_OFFSET_CLAMP
0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
0x00028814 PA_SU_SC_MODE_CNTL
0x00028C08 PA_SU_VTX_CNTL
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
0x00008C10 SQ_STACK_RESOURCE_MGMT_1
0x00008C14 SQ_STACK_RESOURCE_MGMT_2
0x00008C0C SQ_THREAD_RESOURCE_MGMT
0x00028380 SQ_VTX_SEMANTIC_0
0x00028384 SQ_VTX_SEMANTIC_1
0x00028388 SQ_VTX_SEMANTIC_2
0x0002838C SQ_VTX_SEMANTIC_3
0x00028390 SQ_VTX_SEMANTIC_4
0x00028394 SQ_VTX_SEMANTIC_5
0x00028398 SQ_VTX_SEMANTIC_6
0x0002839C SQ_VTX_SEMANTIC_7
0x000283A0 SQ_VTX_SEMANTIC_8
0x000283A4 SQ_VTX_SEMANTIC_9
0x000283A8 SQ_VTX_SEMANTIC_10
0x000283AC SQ_VTX_SEMANTIC_11
0x000283B0 SQ_VTX_SEMANTIC_12
0x000283B4 SQ_VTX_SEMANTIC_13
0x000283B8 SQ_VTX_SEMANTIC_14
0x000283BC SQ_VTX_SEMANTIC_15
0x000283C0 SQ_VTX_SEMANTIC_16
0x000283C4 SQ_VTX_SEMANTIC_17
0x000283C8 SQ_VTX_SEMANTIC_18
0x000283CC SQ_VTX_SEMANTIC_19
0x000283D0 SQ_VTX_SEMANTIC_20
0x000283D4 SQ_VTX_SEMANTIC_21
0x000283D8 SQ_VTX_SEMANTIC_22
0x000283DC SQ_VTX_SEMANTIC_23
0x000283E0 SQ_VTX_SEMANTIC_24
0x000283E4 SQ_VTX_SEMANTIC_25
0x000283E8 SQ_VTX_SEMANTIC_26
0x000283EC SQ_VTX_SEMANTIC_27
0x000283F0 SQ_VTX_SEMANTIC_28
0x000283F4 SQ_VTX_SEMANTIC_29
0x000283F8 SQ_VTX_SEMANTIC_30
0x000283FC SQ_VTX_SEMANTIC_31
0x000288E0 SQ_VTX_SEMANTIC_CLEAR
0x0003CFF4 SQ_VTX_START_INST_LOC
0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
0x000288D8 SQ_PGM_CF_OFFSET_ES
0x000288DC SQ_PGM_CF_OFFSET_FS
0x000288D4 SQ_PGM_CF_OFFSET_GS
0x000288CC SQ_PGM_CF_OFFSET_PS
0x000288D0 SQ_PGM_CF_OFFSET_VS
0x00028854 SQ_PGM_EXPORTS_PS
0x00028890 SQ_PGM_RESOURCES_ES
0x000288A4 SQ_PGM_RESOURCES_FS
0x0002887C SQ_PGM_RESOURCES_GS
0x00028850 SQ_PGM_RESOURCES_PS
0x00028868 SQ_PGM_RESOURCES_VS
0x00009100 SPI_CONFIG_CNTL
0x0000913C SPI_CONFIG_CNTL_1
0x000286DC SPI_FOG_CNTL
0x000286E4 SPI_FOG_FUNC_BIAS
0x000286E0 SPI_FOG_FUNC_SCALE
0x000286D8 SPI_INPUT_Z
0x000286D4 SPI_INTERP_CONTROL_0
0x00028644 SPI_PS_INPUT_CNTL_0
0x00028648 SPI_PS_INPUT_CNTL_1
0x0002864C SPI_PS_INPUT_CNTL_2
0x00028650 SPI_PS_INPUT_CNTL_3
0x00028654 SPI_PS_INPUT_CNTL_4
0x00028658 SPI_PS_INPUT_CNTL_5
0x0002865C SPI_PS_INPUT_CNTL_6
0x00028660 SPI_PS_INPUT_CNTL_7
0x00028664 SPI_PS_INPUT_CNTL_8
0x00028668 SPI_PS_INPUT_CNTL_9
0x0002866C SPI_PS_INPUT_CNTL_10
0x00028670 SPI_PS_INPUT_CNTL_11
0x00028674 SPI_PS_INPUT_CNTL_12
0x00028678 SPI_PS_INPUT_CNTL_13
0x0002867C SPI_PS_INPUT_CNTL_14
0x00028680 SPI_PS_INPUT_CNTL_15
0x00028684 SPI_PS_INPUT_CNTL_16
0x00028688 SPI_PS_INPUT_CNTL_17
0x0002868C SPI_PS_INPUT_CNTL_18
0x00028690 SPI_PS_INPUT_CNTL_19
0x00028694 SPI_PS_INPUT_CNTL_20
0x00028698 SPI_PS_INPUT_CNTL_21
0x0002869C SPI_PS_INPUT_CNTL_22
0x000286A0 SPI_PS_INPUT_CNTL_23
0x000286A4 SPI_PS_INPUT_CNTL_24
0x000286A8 SPI_PS_INPUT_CNTL_25
0x000286AC SPI_PS_INPUT_CNTL_26
0x000286B0 SPI_PS_INPUT_CNTL_27
0x000286B4 SPI_PS_INPUT_CNTL_28
0x000286B8 SPI_PS_INPUT_CNTL_29
0x000286BC SPI_PS_INPUT_CNTL_30
0x000286C0 SPI_PS_INPUT_CNTL_31
0x000286CC SPI_PS_IN_CONTROL_0
0x000286D0 SPI_PS_IN_CONTROL_1
0x000286C4 SPI_VS_OUT_CONFIG
0x00028614 SPI_VS_OUT_ID_0
0x00028618 SPI_VS_OUT_ID_1
0x0002861C SPI_VS_OUT_ID_2
0x00028620 SPI_VS_OUT_ID_3
0x00028624 SPI_VS_OUT_ID_4
0x00028628 SPI_VS_OUT_ID_5
0x0002862C SPI_VS_OUT_ID_6
0x00028630 SPI_VS_OUT_ID_7
0x00028634 SPI_VS_OUT_ID_8
0x00028638 SPI_VS_OUT_ID_9
0x00028438 SX_ALPHA_REF
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028354 SX_SURFACE_SYNC
0x00009014 SX_MEMORY_EXPORT_SIZE
0x00009604 TC_INVALIDATE
0x00009400 TD_FILTER4
0x00009404 TD_FILTER4_1
0x00009408 TD_FILTER4_2
0x0000940C TD_FILTER4_3
0x00009410 TD_FILTER4_4
0x00009414 TD_FILTER4_5
0x00009418 TD_FILTER4_6
0x0000941C TD_FILTER4_7
0x00009420 TD_FILTER4_8
0x00009424 TD_FILTER4_9
0x00009428 TD_FILTER4_10
0x0000942C TD_FILTER4_11
0x00009430 TD_FILTER4_12
0x00009434 TD_FILTER4_13
0x00009438 TD_FILTER4_14
0x0000943C TD_FILTER4_15
0x00009440 TD_FILTER4_16
0x00009444 TD_FILTER4_17
0x00009448 TD_FILTER4_18
0x0000944C TD_FILTER4_19
0x00009450 TD_FILTER4_20
0x00009454 TD_FILTER4_21
0x00009458 TD_FILTER4_22
0x0000945C TD_FILTER4_23
0x00009460 TD_FILTER4_24
0x00009464 TD_FILTER4_25
0x00009468 TD_FILTER4_26
0x0000946C TD_FILTER4_27
0x00009470 TD_FILTER4_28
0x00009474 TD_FILTER4_29
0x00009478 TD_FILTER4_30
0x0000947C TD_FILTER4_31
0x00009480 TD_FILTER4_32
0x00009484 TD_FILTER4_33
0x00009488 TD_FILTER4_34
0x0000948C TD_FILTER4_35
0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
0x0000A800 TD_GS_SAMPLER0_BORDER_RED
0x0000A810 TD_GS_SAMPLER1_BORDER_RED
0x0000A820 TD_GS_SAMPLER2_BORDER_RED
0x0000A830 TD_GS_SAMPLER3_BORDER_RED
0x0000A840 TD_GS_SAMPLER4_BORDER_RED
0x0000A850 TD_GS_SAMPLER5_BORDER_RED
0x0000A860 TD_GS_SAMPLER6_BORDER_RED
0x0000A870 TD_GS_SAMPLER7_BORDER_RED
0x0000A880 TD_GS_SAMPLER8_BORDER_RED
0x0000A890 TD_GS_SAMPLER9_BORDER_RED
0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
0x0000A900 TD_GS_SAMPLER16_BORDER_RED
0x0000A910 TD_GS_SAMPLER17_BORDER_RED
0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
0x0000A400 TD_PS_SAMPLER0_BORDER_RED
0x0000A410 TD_PS_SAMPLER1_BORDER_RED
0x0000A420 TD_PS_SAMPLER2_BORDER_RED
0x0000A430 TD_PS_SAMPLER3_BORDER_RED
0x0000A440 TD_PS_SAMPLER4_BORDER_RED
0x0000A450 TD_PS_SAMPLER5_BORDER_RED
0x0000A460 TD_PS_SAMPLER6_BORDER_RED
0x0000A470 TD_PS_SAMPLER7_BORDER_RED
0x0000A480 TD_PS_SAMPLER8_BORDER_RED
0x0000A490 TD_PS_SAMPLER9_BORDER_RED
0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
0x0000A500 TD_PS_SAMPLER16_BORDER_RED
0x0000A510 TD_PS_SAMPLER17_BORDER_RED
0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
0x0000A600 TD_VS_SAMPLER0_BORDER_RED
0x0000A610 TD_VS_SAMPLER1_BORDER_RED
0x0000A620 TD_VS_SAMPLER2_BORDER_RED
0x0000A630 TD_VS_SAMPLER3_BORDER_RED
0x0000A640 TD_VS_SAMPLER4_BORDER_RED
0x0000A650 TD_VS_SAMPLER5_BORDER_RED
0x0000A660 TD_VS_SAMPLER6_BORDER_RED
0x0000A670 TD_VS_SAMPLER7_BORDER_RED
0x0000A680 TD_VS_SAMPLER8_BORDER_RED
0x0000A690 TD_VS_SAMPLER9_BORDER_RED
0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
0x0000A700 TD_VS_SAMPLER16_BORDER_RED
0x0000A710 TD_VS_SAMPLER17_BORDER_RED
0x00009508 TA_CNTL_AUX
0x0002802C DB_DEPTH_CLEAR
0x00028D34 DB_PREFETCH_LIMIT
0x00028D30 DB_PRELOAD_CONTROL
0x00028D0C DB_RENDER_CONTROL
0x00028D10 DB_RENDER_OVERRIDE
0x0002880C DB_SHADER_CONTROL
0x00028D28 DB_SRESULTS_COMPARE_STATE0
0x00028D2C DB_SRESULTS_COMPARE_STATE1
0x00028430 DB_STENCILREFMASK
0x00028434 DB_STENCILREFMASK_BF
0x00028028 DB_STENCIL_CLEAR
0x00028780 CB_BLEND0_CONTROL
0x00028784 CB_BLEND1_CONTROL
0x00028788 CB_BLEND2_CONTROL
0x0002878C CB_BLEND3_CONTROL
0x00028790 CB_BLEND4_CONTROL
0x00028794 CB_BLEND5_CONTROL
0x00028798 CB_BLEND6_CONTROL
0x0002879C CB_BLEND7_CONTROL
0x00028804 CB_BLEND_CONTROL
0x00028420 CB_BLEND_ALPHA
0x0002841C CB_BLEND_BLUE
0x00028418 CB_BLEND_GREEN
0x00028414 CB_BLEND_RED
0x0002812C CB_CLEAR_ALPHA
0x00028128 CB_CLEAR_BLUE
0x00028124 CB_CLEAR_GREEN
0x00028120 CB_CLEAR_RED
0x00028C30 CB_CLRCMP_CONTROL
0x00028C38 CB_CLRCMP_DST
0x00028C3C CB_CLRCMP_MSK
0x00028C34 CB_CLRCMP_SRC
0x0002842C CB_FOG_BLUE
0x00028428 CB_FOG_GREEN
0x00028424 CB_FOG_RED
0x00008040 WAIT_UNTIL
0x00009714 VC_ENHANCE
0x00009830 DB_DEBUG
0x00009838 DB_WATERMARKS
0x00028D44 DB_ALPHA_TO_MASK
0x00009700 VC_CNTL
/drivers/video/drm/radeon/reg_srcs/rn50
0,0 → 1,30
rn50 0x3294
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
/drivers/video/drm/radeon/reg_srcs/rs600
0,0 → 1,780
rs600 0x6d40
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
0x1D98 VAP_VPORT_XSCALE
0x1D9C VAP_VPORT_XOFFSET
0x1DA0 VAP_VPORT_YSCALE
0x1DA4 VAP_VPORT_YOFFSET
0x1DA8 VAP_VPORT_ZSCALE
0x1DAC VAP_VPORT_ZOFFSET
0x2080 VAP_CNTL
0x2090 VAP_OUT_VTX_FMT_0
0x2094 VAP_OUT_VTX_FMT_1
0x20B0 VAP_VTE_CNTL
0x2138 VAP_VF_MIN_VTX_INDX
0x2140 VAP_CNTL_STATUS
0x2150 VAP_PROG_STREAM_CNTL_0
0x2154 VAP_PROG_STREAM_CNTL_1
0x2158 VAP_PROG_STREAM_CNTL_2
0x215C VAP_PROG_STREAM_CNTL_3
0x2160 VAP_PROG_STREAM_CNTL_4
0x2164 VAP_PROG_STREAM_CNTL_5
0x2168 VAP_PROG_STREAM_CNTL_6
0x216C VAP_PROG_STREAM_CNTL_7
0x2180 VAP_VTX_STATE_CNTL
0x2184 VAP_VSM_VTX_ASSM
0x2188 VAP_VTX_STATE_IND_REG_0
0x218C VAP_VTX_STATE_IND_REG_1
0x2190 VAP_VTX_STATE_IND_REG_2
0x2194 VAP_VTX_STATE_IND_REG_3
0x2198 VAP_VTX_STATE_IND_REG_4
0x219C VAP_VTX_STATE_IND_REG_5
0x21A0 VAP_VTX_STATE_IND_REG_6
0x21A4 VAP_VTX_STATE_IND_REG_7
0x21A8 VAP_VTX_STATE_IND_REG_8
0x21AC VAP_VTX_STATE_IND_REG_9
0x21B0 VAP_VTX_STATE_IND_REG_10
0x21B4 VAP_VTX_STATE_IND_REG_11
0x21B8 VAP_VTX_STATE_IND_REG_12
0x21BC VAP_VTX_STATE_IND_REG_13
0x21C0 VAP_VTX_STATE_IND_REG_14
0x21C4 VAP_VTX_STATE_IND_REG_15
0x21DC VAP_PSC_SGN_NORM_CNTL
0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
0x21EC VAP_PROG_STREAM_CNTL_EXT_3
0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
0x21FC VAP_PROG_STREAM_CNTL_EXT_7
0x2200 VAP_PVS_VECTOR_INDX_REG
0x2204 VAP_PVS_VECTOR_DATA_REG
0x2208 VAP_PVS_VECTOR_DATA_REG_128
0x221C VAP_CLIP_CNTL
0x2220 VAP_GB_VERT_CLIP_ADJ
0x2224 VAP_GB_VERT_DISC_ADJ
0x2228 VAP_GB_HORZ_CLIP_ADJ
0x222C VAP_GB_HORZ_DISC_ADJ
0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
0x2284 VAP_PVS_STATE_FLUSH_REG
0x2288 VAP_PVS_VTX_TIMEOUT_REG
0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
0x22D0 VAP_PVS_CODE_CNTL_0
0x22D4 VAP_PVS_CONST_CNTL
0x22D8 VAP_PVS_CODE_CNTL_1
0x22DC VAP_PVS_FLOW_CNTL_OPC
0x342C RB2D_DSTCACHE_CTLSTAT
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
0x4010 GB_MSPOS0
0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
0x4100 TX_INVALTAGS
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
0x420C GA_POINT_T1
0x4214 GA_TRIANGLE_STIPPLE
0x421C GA_POINT_SIZE
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
0x4278 GA_COLOR_CONTROL
0x427C GA_SOLID_RG
0x4280 GA_SOLID_BA
0x4288 GA_POLY_MODE
0x428C GA_ROUND_MODE
0x4290 GA_OFFSET
0x4294 GA_FOG_SCALE
0x4298 GA_FOG_OFFSET
0x42A0 SU_TEX_WRAP
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
0x42C4 SU_DEPTH_OFFSET
0x42C8 SU_REG_DEST
0x4300 RS_COUNT
0x4304 RS_INST_COUNT
0x4310 RS_IP_0
0x4314 RS_IP_1
0x4318 RS_IP_2
0x431C RS_IP_3
0x4320 RS_IP_4
0x4324 RS_IP_5
0x4328 RS_IP_6
0x432C RS_IP_7
0x4330 RS_INST_0
0x4334 RS_INST_1
0x4338 RS_INST_2
0x433C RS_INST_3
0x4340 RS_INST_4
0x4344 RS_INST_5
0x4348 RS_INST_6
0x434C RS_INST_7
0x4350 RS_INST_8
0x4354 RS_INST_9
0x4358 RS_INST_10
0x435C RS_INST_11
0x4360 RS_INST_12
0x4364 RS_INST_13
0x4368 RS_INST_14
0x436C RS_INST_15
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
0x43B8 SC_CLIP_1_A
0x43BC SC_CLIP_1_B
0x43C0 SC_CLIP_2_A
0x43C4 SC_CLIP_2_B
0x43C8 SC_CLIP_3_A
0x43CC SC_CLIP_3_B
0x43D0 SC_CLIP_RULE
0x43E0 SC_SCISSOR0
0x43E8 SC_SCREENDOOR
0x4440 TX_FILTER1_0
0x4444 TX_FILTER1_1
0x4448 TX_FILTER1_2
0x444C TX_FILTER1_3
0x4450 TX_FILTER1_4
0x4454 TX_FILTER1_5
0x4458 TX_FILTER1_6
0x445C TX_FILTER1_7
0x4460 TX_FILTER1_8
0x4464 TX_FILTER1_9
0x4468 TX_FILTER1_10
0x446C TX_FILTER1_11
0x4470 TX_FILTER1_12
0x4474 TX_FILTER1_13
0x4478 TX_FILTER1_14
0x447C TX_FILTER1_15
0x4580 TX_CHROMA_KEY_0
0x4584 TX_CHROMA_KEY_1
0x4588 TX_CHROMA_KEY_2
0x458C TX_CHROMA_KEY_3
0x4590 TX_CHROMA_KEY_4
0x4594 TX_CHROMA_KEY_5
0x4598 TX_CHROMA_KEY_6
0x459C TX_CHROMA_KEY_7
0x45A0 TX_CHROMA_KEY_8
0x45A4 TX_CHROMA_KEY_9
0x45A8 TX_CHROMA_KEY_10
0x45AC TX_CHROMA_KEY_11
0x45B0 TX_CHROMA_KEY_12
0x45B4 TX_CHROMA_KEY_13
0x45B8 TX_CHROMA_KEY_14
0x45BC TX_CHROMA_KEY_15
0x45C0 TX_BORDER_COLOR_0
0x45C4 TX_BORDER_COLOR_1
0x45C8 TX_BORDER_COLOR_2
0x45CC TX_BORDER_COLOR_3
0x45D0 TX_BORDER_COLOR_4
0x45D4 TX_BORDER_COLOR_5
0x45D8 TX_BORDER_COLOR_6
0x45DC TX_BORDER_COLOR_7
0x45E0 TX_BORDER_COLOR_8
0x45E4 TX_BORDER_COLOR_9
0x45E8 TX_BORDER_COLOR_10
0x45EC TX_BORDER_COLOR_11
0x45F0 TX_BORDER_COLOR_12
0x45F4 TX_BORDER_COLOR_13
0x45F8 TX_BORDER_COLOR_14
0x45FC TX_BORDER_COLOR_15
0x4600 US_CONFIG
0x4604 US_PIXSIZE
0x4608 US_CODE_OFFSET
0x460C US_RESET
0x4610 US_CODE_ADDR_0
0x4614 US_CODE_ADDR_1
0x4618 US_CODE_ADDR_2
0x461C US_CODE_ADDR_3
0x4620 US_TEX_INST_0
0x4624 US_TEX_INST_1
0x4628 US_TEX_INST_2
0x462C US_TEX_INST_3
0x4630 US_TEX_INST_4
0x4634 US_TEX_INST_5
0x4638 US_TEX_INST_6
0x463C US_TEX_INST_7
0x4640 US_TEX_INST_8
0x4644 US_TEX_INST_9
0x4648 US_TEX_INST_10
0x464C US_TEX_INST_11
0x4650 US_TEX_INST_12
0x4654 US_TEX_INST_13
0x4658 US_TEX_INST_14
0x465C US_TEX_INST_15
0x4660 US_TEX_INST_16
0x4664 US_TEX_INST_17
0x4668 US_TEX_INST_18
0x466C US_TEX_INST_19
0x4670 US_TEX_INST_20
0x4674 US_TEX_INST_21
0x4678 US_TEX_INST_22
0x467C US_TEX_INST_23
0x4680 US_TEX_INST_24
0x4684 US_TEX_INST_25
0x4688 US_TEX_INST_26
0x468C US_TEX_INST_27
0x4690 US_TEX_INST_28
0x4694 US_TEX_INST_29
0x4698 US_TEX_INST_30
0x469C US_TEX_INST_31
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
0x46B8 US_CODE_BANK
0x46BC US_CODE_EXT
0x46C0 US_ALU_RGB_ADDR_0
0x46C4 US_ALU_RGB_ADDR_1
0x46C8 US_ALU_RGB_ADDR_2
0x46CC US_ALU_RGB_ADDR_3
0x46D0 US_ALU_RGB_ADDR_4
0x46D4 US_ALU_RGB_ADDR_5
0x46D8 US_ALU_RGB_ADDR_6
0x46DC US_ALU_RGB_ADDR_7
0x46E0 US_ALU_RGB_ADDR_8
0x46E4 US_ALU_RGB_ADDR_9
0x46E8 US_ALU_RGB_ADDR_10
0x46EC US_ALU_RGB_ADDR_11
0x46F0 US_ALU_RGB_ADDR_12
0x46F4 US_ALU_RGB_ADDR_13
0x46F8 US_ALU_RGB_ADDR_14
0x46FC US_ALU_RGB_ADDR_15
0x4700 US_ALU_RGB_ADDR_16
0x4704 US_ALU_RGB_ADDR_17
0x4708 US_ALU_RGB_ADDR_18
0x470C US_ALU_RGB_ADDR_19
0x4710 US_ALU_RGB_ADDR_20
0x4714 US_ALU_RGB_ADDR_21
0x4718 US_ALU_RGB_ADDR_22
0x471C US_ALU_RGB_ADDR_23
0x4720 US_ALU_RGB_ADDR_24
0x4724 US_ALU_RGB_ADDR_25
0x4728 US_ALU_RGB_ADDR_26
0x472C US_ALU_RGB_ADDR_27
0x4730 US_ALU_RGB_ADDR_28
0x4734 US_ALU_RGB_ADDR_29
0x4738 US_ALU_RGB_ADDR_30
0x473C US_ALU_RGB_ADDR_31
0x4740 US_ALU_RGB_ADDR_32
0x4744 US_ALU_RGB_ADDR_33
0x4748 US_ALU_RGB_ADDR_34
0x474C US_ALU_RGB_ADDR_35
0x4750 US_ALU_RGB_ADDR_36
0x4754 US_ALU_RGB_ADDR_37
0x4758 US_ALU_RGB_ADDR_38
0x475C US_ALU_RGB_ADDR_39
0x4760 US_ALU_RGB_ADDR_40
0x4764 US_ALU_RGB_ADDR_41
0x4768 US_ALU_RGB_ADDR_42
0x476C US_ALU_RGB_ADDR_43
0x4770 US_ALU_RGB_ADDR_44
0x4774 US_ALU_RGB_ADDR_45
0x4778 US_ALU_RGB_ADDR_46
0x477C US_ALU_RGB_ADDR_47
0x4780 US_ALU_RGB_ADDR_48
0x4784 US_ALU_RGB_ADDR_49
0x4788 US_ALU_RGB_ADDR_50
0x478C US_ALU_RGB_ADDR_51
0x4790 US_ALU_RGB_ADDR_52
0x4794 US_ALU_RGB_ADDR_53
0x4798 US_ALU_RGB_ADDR_54
0x479C US_ALU_RGB_ADDR_55
0x47A0 US_ALU_RGB_ADDR_56
0x47A4 US_ALU_RGB_ADDR_57
0x47A8 US_ALU_RGB_ADDR_58
0x47AC US_ALU_RGB_ADDR_59
0x47B0 US_ALU_RGB_ADDR_60
0x47B4 US_ALU_RGB_ADDR_61
0x47B8 US_ALU_RGB_ADDR_62
0x47BC US_ALU_RGB_ADDR_63
0x47C0 US_ALU_ALPHA_ADDR_0
0x47C4 US_ALU_ALPHA_ADDR_1
0x47C8 US_ALU_ALPHA_ADDR_2
0x47CC US_ALU_ALPHA_ADDR_3
0x47D0 US_ALU_ALPHA_ADDR_4
0x47D4 US_ALU_ALPHA_ADDR_5
0x47D8 US_ALU_ALPHA_ADDR_6
0x47DC US_ALU_ALPHA_ADDR_7
0x47E0 US_ALU_ALPHA_ADDR_8
0x47E4 US_ALU_ALPHA_ADDR_9
0x47E8 US_ALU_ALPHA_ADDR_10
0x47EC US_ALU_ALPHA_ADDR_11
0x47F0 US_ALU_ALPHA_ADDR_12
0x47F4 US_ALU_ALPHA_ADDR_13
0x47F8 US_ALU_ALPHA_ADDR_14
0x47FC US_ALU_ALPHA_ADDR_15
0x4800 US_ALU_ALPHA_ADDR_16
0x4804 US_ALU_ALPHA_ADDR_17
0x4808 US_ALU_ALPHA_ADDR_18
0x480C US_ALU_ALPHA_ADDR_19
0x4810 US_ALU_ALPHA_ADDR_20
0x4814 US_ALU_ALPHA_ADDR_21
0x4818 US_ALU_ALPHA_ADDR_22
0x481C US_ALU_ALPHA_ADDR_23
0x4820 US_ALU_ALPHA_ADDR_24
0x4824 US_ALU_ALPHA_ADDR_25
0x4828 US_ALU_ALPHA_ADDR_26
0x482C US_ALU_ALPHA_ADDR_27
0x4830 US_ALU_ALPHA_ADDR_28
0x4834 US_ALU_ALPHA_ADDR_29
0x4838 US_ALU_ALPHA_ADDR_30
0x483C US_ALU_ALPHA_ADDR_31
0x4840 US_ALU_ALPHA_ADDR_32
0x4844 US_ALU_ALPHA_ADDR_33
0x4848 US_ALU_ALPHA_ADDR_34
0x484C US_ALU_ALPHA_ADDR_35
0x4850 US_ALU_ALPHA_ADDR_36
0x4854 US_ALU_ALPHA_ADDR_37
0x4858 US_ALU_ALPHA_ADDR_38
0x485C US_ALU_ALPHA_ADDR_39
0x4860 US_ALU_ALPHA_ADDR_40
0x4864 US_ALU_ALPHA_ADDR_41
0x4868 US_ALU_ALPHA_ADDR_42
0x486C US_ALU_ALPHA_ADDR_43
0x4870 US_ALU_ALPHA_ADDR_44
0x4874 US_ALU_ALPHA_ADDR_45
0x4878 US_ALU_ALPHA_ADDR_46
0x487C US_ALU_ALPHA_ADDR_47
0x4880 US_ALU_ALPHA_ADDR_48
0x4884 US_ALU_ALPHA_ADDR_49
0x4888 US_ALU_ALPHA_ADDR_50
0x488C US_ALU_ALPHA_ADDR_51
0x4890 US_ALU_ALPHA_ADDR_52
0x4894 US_ALU_ALPHA_ADDR_53
0x4898 US_ALU_ALPHA_ADDR_54
0x489C US_ALU_ALPHA_ADDR_55
0x48A0 US_ALU_ALPHA_ADDR_56
0x48A4 US_ALU_ALPHA_ADDR_57
0x48A8 US_ALU_ALPHA_ADDR_58
0x48AC US_ALU_ALPHA_ADDR_59
0x48B0 US_ALU_ALPHA_ADDR_60
0x48B4 US_ALU_ALPHA_ADDR_61
0x48B8 US_ALU_ALPHA_ADDR_62
0x48BC US_ALU_ALPHA_ADDR_63
0x48C0 US_ALU_RGB_INST_0
0x48C4 US_ALU_RGB_INST_1
0x48C8 US_ALU_RGB_INST_2
0x48CC US_ALU_RGB_INST_3
0x48D0 US_ALU_RGB_INST_4
0x48D4 US_ALU_RGB_INST_5
0x48D8 US_ALU_RGB_INST_6
0x48DC US_ALU_RGB_INST_7
0x48E0 US_ALU_RGB_INST_8
0x48E4 US_ALU_RGB_INST_9
0x48E8 US_ALU_RGB_INST_10
0x48EC US_ALU_RGB_INST_11
0x48F0 US_ALU_RGB_INST_12
0x48F4 US_ALU_RGB_INST_13
0x48F8 US_ALU_RGB_INST_14
0x48FC US_ALU_RGB_INST_15
0x4900 US_ALU_RGB_INST_16
0x4904 US_ALU_RGB_INST_17
0x4908 US_ALU_RGB_INST_18
0x490C US_ALU_RGB_INST_19
0x4910 US_ALU_RGB_INST_20
0x4914 US_ALU_RGB_INST_21
0x4918 US_ALU_RGB_INST_22
0x491C US_ALU_RGB_INST_23
0x4920 US_ALU_RGB_INST_24
0x4924 US_ALU_RGB_INST_25
0x4928 US_ALU_RGB_INST_26
0x492C US_ALU_RGB_INST_27
0x4930 US_ALU_RGB_INST_28
0x4934 US_ALU_RGB_INST_29
0x4938 US_ALU_RGB_INST_30
0x493C US_ALU_RGB_INST_31
0x4940 US_ALU_RGB_INST_32
0x4944 US_ALU_RGB_INST_33
0x4948 US_ALU_RGB_INST_34
0x494C US_ALU_RGB_INST_35
0x4950 US_ALU_RGB_INST_36
0x4954 US_ALU_RGB_INST_37
0x4958 US_ALU_RGB_INST_38
0x495C US_ALU_RGB_INST_39
0x4960 US_ALU_RGB_INST_40
0x4964 US_ALU_RGB_INST_41
0x4968 US_ALU_RGB_INST_42
0x496C US_ALU_RGB_INST_43
0x4970 US_ALU_RGB_INST_44
0x4974 US_ALU_RGB_INST_45
0x4978 US_ALU_RGB_INST_46
0x497C US_ALU_RGB_INST_47
0x4980 US_ALU_RGB_INST_48
0x4984 US_ALU_RGB_INST_49
0x4988 US_ALU_RGB_INST_50
0x498C US_ALU_RGB_INST_51
0x4990 US_ALU_RGB_INST_52
0x4994 US_ALU_RGB_INST_53
0x4998 US_ALU_RGB_INST_54
0x499C US_ALU_RGB_INST_55
0x49A0 US_ALU_RGB_INST_56
0x49A4 US_ALU_RGB_INST_57
0x49A8 US_ALU_RGB_INST_58
0x49AC US_ALU_RGB_INST_59
0x49B0 US_ALU_RGB_INST_60
0x49B4 US_ALU_RGB_INST_61
0x49B8 US_ALU_RGB_INST_62
0x49BC US_ALU_RGB_INST_63
0x49C0 US_ALU_ALPHA_INST_0
0x49C4 US_ALU_ALPHA_INST_1
0x49C8 US_ALU_ALPHA_INST_2
0x49CC US_ALU_ALPHA_INST_3
0x49D0 US_ALU_ALPHA_INST_4
0x49D4 US_ALU_ALPHA_INST_5
0x49D8 US_ALU_ALPHA_INST_6
0x49DC US_ALU_ALPHA_INST_7
0x49E0 US_ALU_ALPHA_INST_8
0x49E4 US_ALU_ALPHA_INST_9
0x49E8 US_ALU_ALPHA_INST_10
0x49EC US_ALU_ALPHA_INST_11
0x49F0 US_ALU_ALPHA_INST_12
0x49F4 US_ALU_ALPHA_INST_13
0x49F8 US_ALU_ALPHA_INST_14
0x49FC US_ALU_ALPHA_INST_15
0x4A00 US_ALU_ALPHA_INST_16
0x4A04 US_ALU_ALPHA_INST_17
0x4A08 US_ALU_ALPHA_INST_18
0x4A0C US_ALU_ALPHA_INST_19
0x4A10 US_ALU_ALPHA_INST_20
0x4A14 US_ALU_ALPHA_INST_21
0x4A18 US_ALU_ALPHA_INST_22
0x4A1C US_ALU_ALPHA_INST_23
0x4A20 US_ALU_ALPHA_INST_24
0x4A24 US_ALU_ALPHA_INST_25
0x4A28 US_ALU_ALPHA_INST_26
0x4A2C US_ALU_ALPHA_INST_27
0x4A30 US_ALU_ALPHA_INST_28
0x4A34 US_ALU_ALPHA_INST_29
0x4A38 US_ALU_ALPHA_INST_30
0x4A3C US_ALU_ALPHA_INST_31
0x4A40 US_ALU_ALPHA_INST_32
0x4A44 US_ALU_ALPHA_INST_33
0x4A48 US_ALU_ALPHA_INST_34
0x4A4C US_ALU_ALPHA_INST_35
0x4A50 US_ALU_ALPHA_INST_36
0x4A54 US_ALU_ALPHA_INST_37
0x4A58 US_ALU_ALPHA_INST_38
0x4A5C US_ALU_ALPHA_INST_39
0x4A60 US_ALU_ALPHA_INST_40
0x4A64 US_ALU_ALPHA_INST_41
0x4A68 US_ALU_ALPHA_INST_42
0x4A6C US_ALU_ALPHA_INST_43
0x4A70 US_ALU_ALPHA_INST_44
0x4A74 US_ALU_ALPHA_INST_45
0x4A78 US_ALU_ALPHA_INST_46
0x4A7C US_ALU_ALPHA_INST_47
0x4A80 US_ALU_ALPHA_INST_48
0x4A84 US_ALU_ALPHA_INST_49
0x4A88 US_ALU_ALPHA_INST_50
0x4A8C US_ALU_ALPHA_INST_51
0x4A90 US_ALU_ALPHA_INST_52
0x4A94 US_ALU_ALPHA_INST_53
0x4A98 US_ALU_ALPHA_INST_54
0x4A9C US_ALU_ALPHA_INST_55
0x4AA0 US_ALU_ALPHA_INST_56
0x4AA4 US_ALU_ALPHA_INST_57
0x4AA8 US_ALU_ALPHA_INST_58
0x4AAC US_ALU_ALPHA_INST_59
0x4AB0 US_ALU_ALPHA_INST_60
0x4AB4 US_ALU_ALPHA_INST_61
0x4AB8 US_ALU_ALPHA_INST_62
0x4ABC US_ALU_ALPHA_INST_63
0x4AC0 US_ALU_EXT_ADDR_0
0x4AC4 US_ALU_EXT_ADDR_1
0x4AC8 US_ALU_EXT_ADDR_2
0x4ACC US_ALU_EXT_ADDR_3
0x4AD0 US_ALU_EXT_ADDR_4
0x4AD4 US_ALU_EXT_ADDR_5
0x4AD8 US_ALU_EXT_ADDR_6
0x4ADC US_ALU_EXT_ADDR_7
0x4AE0 US_ALU_EXT_ADDR_8
0x4AE4 US_ALU_EXT_ADDR_9
0x4AE8 US_ALU_EXT_ADDR_10
0x4AEC US_ALU_EXT_ADDR_11
0x4AF0 US_ALU_EXT_ADDR_12
0x4AF4 US_ALU_EXT_ADDR_13
0x4AF8 US_ALU_EXT_ADDR_14
0x4AFC US_ALU_EXT_ADDR_15
0x4B00 US_ALU_EXT_ADDR_16
0x4B04 US_ALU_EXT_ADDR_17
0x4B08 US_ALU_EXT_ADDR_18
0x4B0C US_ALU_EXT_ADDR_19
0x4B10 US_ALU_EXT_ADDR_20
0x4B14 US_ALU_EXT_ADDR_21
0x4B18 US_ALU_EXT_ADDR_22
0x4B1C US_ALU_EXT_ADDR_23
0x4B20 US_ALU_EXT_ADDR_24
0x4B24 US_ALU_EXT_ADDR_25
0x4B28 US_ALU_EXT_ADDR_26
0x4B2C US_ALU_EXT_ADDR_27
0x4B30 US_ALU_EXT_ADDR_28
0x4B34 US_ALU_EXT_ADDR_29
0x4B38 US_ALU_EXT_ADDR_30
0x4B3C US_ALU_EXT_ADDR_31
0x4B40 US_ALU_EXT_ADDR_32
0x4B44 US_ALU_EXT_ADDR_33
0x4B48 US_ALU_EXT_ADDR_34
0x4B4C US_ALU_EXT_ADDR_35
0x4B50 US_ALU_EXT_ADDR_36
0x4B54 US_ALU_EXT_ADDR_37
0x4B58 US_ALU_EXT_ADDR_38
0x4B5C US_ALU_EXT_ADDR_39
0x4B60 US_ALU_EXT_ADDR_40
0x4B64 US_ALU_EXT_ADDR_41
0x4B68 US_ALU_EXT_ADDR_42
0x4B6C US_ALU_EXT_ADDR_43
0x4B70 US_ALU_EXT_ADDR_44
0x4B74 US_ALU_EXT_ADDR_45
0x4B78 US_ALU_EXT_ADDR_46
0x4B7C US_ALU_EXT_ADDR_47
0x4B80 US_ALU_EXT_ADDR_48
0x4B84 US_ALU_EXT_ADDR_49
0x4B88 US_ALU_EXT_ADDR_50
0x4B8C US_ALU_EXT_ADDR_51
0x4B90 US_ALU_EXT_ADDR_52
0x4B94 US_ALU_EXT_ADDR_53
0x4B98 US_ALU_EXT_ADDR_54
0x4B9C US_ALU_EXT_ADDR_55
0x4BA0 US_ALU_EXT_ADDR_56
0x4BA4 US_ALU_EXT_ADDR_57
0x4BA8 US_ALU_EXT_ADDR_58
0x4BAC US_ALU_EXT_ADDR_59
0x4BB0 US_ALU_EXT_ADDR_60
0x4BB4 US_ALU_EXT_ADDR_61
0x4BB8 US_ALU_EXT_ADDR_62
0x4BBC US_ALU_EXT_ADDR_63
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
0x4BCC FG_FOG_COLOR_G
0x4BD0 FG_FOG_COLOR_B
0x4BD4 FG_ALPHA_FUNC
0x4BD8 FG_DEPTH_SRC
0x4C00 US_ALU_CONST_R_0
0x4C04 US_ALU_CONST_G_0
0x4C08 US_ALU_CONST_B_0
0x4C0C US_ALU_CONST_A_0
0x4C10 US_ALU_CONST_R_1
0x4C14 US_ALU_CONST_G_1
0x4C18 US_ALU_CONST_B_1
0x4C1C US_ALU_CONST_A_1
0x4C20 US_ALU_CONST_R_2
0x4C24 US_ALU_CONST_G_2
0x4C28 US_ALU_CONST_B_2
0x4C2C US_ALU_CONST_A_2
0x4C30 US_ALU_CONST_R_3
0x4C34 US_ALU_CONST_G_3
0x4C38 US_ALU_CONST_B_3
0x4C3C US_ALU_CONST_A_3
0x4C40 US_ALU_CONST_R_4
0x4C44 US_ALU_CONST_G_4
0x4C48 US_ALU_CONST_B_4
0x4C4C US_ALU_CONST_A_4
0x4C50 US_ALU_CONST_R_5
0x4C54 US_ALU_CONST_G_5
0x4C58 US_ALU_CONST_B_5
0x4C5C US_ALU_CONST_A_5
0x4C60 US_ALU_CONST_R_6
0x4C64 US_ALU_CONST_G_6
0x4C68 US_ALU_CONST_B_6
0x4C6C US_ALU_CONST_A_6
0x4C70 US_ALU_CONST_R_7
0x4C74 US_ALU_CONST_G_7
0x4C78 US_ALU_CONST_B_7
0x4C7C US_ALU_CONST_A_7
0x4C80 US_ALU_CONST_R_8
0x4C84 US_ALU_CONST_G_8
0x4C88 US_ALU_CONST_B_8
0x4C8C US_ALU_CONST_A_8
0x4C90 US_ALU_CONST_R_9
0x4C94 US_ALU_CONST_G_9
0x4C98 US_ALU_CONST_B_9
0x4C9C US_ALU_CONST_A_9
0x4CA0 US_ALU_CONST_R_10
0x4CA4 US_ALU_CONST_G_10
0x4CA8 US_ALU_CONST_B_10
0x4CAC US_ALU_CONST_A_10
0x4CB0 US_ALU_CONST_R_11
0x4CB4 US_ALU_CONST_G_11
0x4CB8 US_ALU_CONST_B_11
0x4CBC US_ALU_CONST_A_11
0x4CC0 US_ALU_CONST_R_12
0x4CC4 US_ALU_CONST_G_12
0x4CC8 US_ALU_CONST_B_12
0x4CCC US_ALU_CONST_A_12
0x4CD0 US_ALU_CONST_R_13
0x4CD4 US_ALU_CONST_G_13
0x4CD8 US_ALU_CONST_B_13
0x4CDC US_ALU_CONST_A_13
0x4CE0 US_ALU_CONST_R_14
0x4CE4 US_ALU_CONST_G_14
0x4CE8 US_ALU_CONST_B_14
0x4CEC US_ALU_CONST_A_14
0x4CF0 US_ALU_CONST_R_15
0x4CF4 US_ALU_CONST_G_15
0x4CF8 US_ALU_CONST_B_15
0x4CFC US_ALU_CONST_A_15
0x4D00 US_ALU_CONST_R_16
0x4D04 US_ALU_CONST_G_16
0x4D08 US_ALU_CONST_B_16
0x4D0C US_ALU_CONST_A_16
0x4D10 US_ALU_CONST_R_17
0x4D14 US_ALU_CONST_G_17
0x4D18 US_ALU_CONST_B_17
0x4D1C US_ALU_CONST_A_17
0x4D20 US_ALU_CONST_R_18
0x4D24 US_ALU_CONST_G_18
0x4D28 US_ALU_CONST_B_18
0x4D2C US_ALU_CONST_A_18
0x4D30 US_ALU_CONST_R_19
0x4D34 US_ALU_CONST_G_19
0x4D38 US_ALU_CONST_B_19
0x4D3C US_ALU_CONST_A_19
0x4D40 US_ALU_CONST_R_20
0x4D44 US_ALU_CONST_G_20
0x4D48 US_ALU_CONST_B_20
0x4D4C US_ALU_CONST_A_20
0x4D50 US_ALU_CONST_R_21
0x4D54 US_ALU_CONST_G_21
0x4D58 US_ALU_CONST_B_21
0x4D5C US_ALU_CONST_A_21
0x4D60 US_ALU_CONST_R_22
0x4D64 US_ALU_CONST_G_22
0x4D68 US_ALU_CONST_B_22
0x4D6C US_ALU_CONST_A_22
0x4D70 US_ALU_CONST_R_23
0x4D74 US_ALU_CONST_G_23
0x4D78 US_ALU_CONST_B_23
0x4D7C US_ALU_CONST_A_23
0x4D80 US_ALU_CONST_R_24
0x4D84 US_ALU_CONST_G_24
0x4D88 US_ALU_CONST_B_24
0x4D8C US_ALU_CONST_A_24
0x4D90 US_ALU_CONST_R_25
0x4D94 US_ALU_CONST_G_25
0x4D98 US_ALU_CONST_B_25
0x4D9C US_ALU_CONST_A_25
0x4DA0 US_ALU_CONST_R_26
0x4DA4 US_ALU_CONST_G_26
0x4DA8 US_ALU_CONST_B_26
0x4DAC US_ALU_CONST_A_26
0x4DB0 US_ALU_CONST_R_27
0x4DB4 US_ALU_CONST_G_27
0x4DB8 US_ALU_CONST_B_27
0x4DBC US_ALU_CONST_A_27
0x4DC0 US_ALU_CONST_R_28
0x4DC4 US_ALU_CONST_G_28
0x4DC8 US_ALU_CONST_B_28
0x4DCC US_ALU_CONST_A_28
0x4DD0 US_ALU_CONST_R_29
0x4DD4 US_ALU_CONST_G_29
0x4DD8 US_ALU_CONST_B_29
0x4DDC US_ALU_CONST_A_29
0x4DE0 US_ALU_CONST_R_30
0x4DE4 US_ALU_CONST_G_30
0x4DE8 US_ALU_CONST_B_30
0x4DEC US_ALU_CONST_A_30
0x4DF0 US_ALU_CONST_R_31
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
0x4E08 RB3D_ABLENDCNTL_R3
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
0x4E1C RB3D_CLRCMP_FLIPE_R3
0x4E20 RB3D_CLRCMP_CLR_R3
0x4E24 RB3D_CLRCMP_MSK_R3
0x4E48 RB3D_DEBUG_CTL
0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
0x4E50 RB3D_DITHER_CTL
0x4E54 RB3D_CMASK_OFFSET0
0x4E58 RB3D_CMASK_OFFSET1
0x4E5C RB3D_CMASK_OFFSET2
0x4E60 RB3D_CMASK_OFFSET3
0x4E64 RB3D_CMASK_PITCH0
0x4E68 RB3D_CMASK_PITCH1
0x4E6C RB3D_CMASK_PITCH2
0x4E70 RB3D_CMASK_PITCH3
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
0x4F28 ZB_DEPTHCLEARVALUE
0x4F58 ZB_ZPASS_DATA
/drivers/video/drm/radeon/reg_srcs/rv515
0,0 → 1,494
rv515 0x6d40
0x1434 SRC_Y_X
0x1438 DST_Y_X
0x143C DST_HEIGHT_WIDTH
0x146C DP_GUI_MASTER_CNTL
0x1474 BRUSH_Y_X
0x1478 DP_BRUSH_BKGD_CLR
0x147C DP_BRUSH_FRGD_CLR
0x1480 BRUSH_DATA0
0x1484 BRUSH_DATA1
0x1598 DST_WIDTH_HEIGHT
0x15C0 CLR_CMP_CNTL
0x15C4 CLR_CMP_CLR_SRC
0x15C8 CLR_CMP_CLR_DST
0x15CC CLR_CMP_MSK
0x15D8 DP_SRC_FRGD_CLR
0x15DC DP_SRC_BKGD_CLR
0x1600 DST_LINE_START
0x1604 DST_LINE_END
0x1608 DST_LINE_PATCOUNT
0x16C0 DP_CNTL
0x16CC DP_WRITE_MSK
0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
0x16E8 DEFAULT_SC_BOTTOM_RIGHT
0x16EC SC_TOP_LEFT
0x16F0 SC_BOTTOM_RIGHT
0x16F4 SRC_SC_BOTTOM_RIGHT
0x1714 DSTCACHE_CTLSTAT
0x1720 WAIT_UNTIL
0x172C RBBM_GUICNTL
0x1D98 VAP_VPORT_XSCALE
0x1D9C VAP_VPORT_XOFFSET
0x1DA0 VAP_VPORT_YSCALE
0x1DA4 VAP_VPORT_YOFFSET
0x1DA8 VAP_VPORT_ZSCALE
0x1DAC VAP_VPORT_ZOFFSET
0x2080 VAP_CNTL
0x208C VAP_INDEX_OFFSET
0x2090 VAP_OUT_VTX_FMT_0
0x2094 VAP_OUT_VTX_FMT_1
0x20B0 VAP_VTE_CNTL
0x2138 VAP_VF_MIN_VTX_INDX
0x2140 VAP_CNTL_STATUS
0x2150 VAP_PROG_STREAM_CNTL_0
0x2154 VAP_PROG_STREAM_CNTL_1
0x2158 VAP_PROG_STREAM_CNTL_2
0x215C VAP_PROG_STREAM_CNTL_3
0x2160 VAP_PROG_STREAM_CNTL_4
0x2164 VAP_PROG_STREAM_CNTL_5
0x2168 VAP_PROG_STREAM_CNTL_6
0x216C VAP_PROG_STREAM_CNTL_7
0x2180 VAP_VTX_STATE_CNTL
0x2184 VAP_VSM_VTX_ASSM
0x2188 VAP_VTX_STATE_IND_REG_0
0x218C VAP_VTX_STATE_IND_REG_1
0x2190 VAP_VTX_STATE_IND_REG_2
0x2194 VAP_VTX_STATE_IND_REG_3
0x2198 VAP_VTX_STATE_IND_REG_4
0x219C VAP_VTX_STATE_IND_REG_5
0x21A0 VAP_VTX_STATE_IND_REG_6
0x21A4 VAP_VTX_STATE_IND_REG_7
0x21A8 VAP_VTX_STATE_IND_REG_8
0x21AC VAP_VTX_STATE_IND_REG_9
0x21B0 VAP_VTX_STATE_IND_REG_10
0x21B4 VAP_VTX_STATE_IND_REG_11
0x21B8 VAP_VTX_STATE_IND_REG_12
0x21BC VAP_VTX_STATE_IND_REG_13
0x21C0 VAP_VTX_STATE_IND_REG_14
0x21C4 VAP_VTX_STATE_IND_REG_15
0x21DC VAP_PSC_SGN_NORM_CNTL
0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
0x21EC VAP_PROG_STREAM_CNTL_EXT_3
0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
0x21FC VAP_PROG_STREAM_CNTL_EXT_7
0x2200 VAP_PVS_VECTOR_INDX_REG
0x2204 VAP_PVS_VECTOR_DATA_REG
0x2208 VAP_PVS_VECTOR_DATA_REG_128
0x2218 VAP_TEX_TO_COLOR_CNTL
0x221C VAP_CLIP_CNTL
0x2220 VAP_GB_VERT_CLIP_ADJ
0x2224 VAP_GB_VERT_DISC_ADJ
0x2228 VAP_GB_HORZ_CLIP_ADJ
0x222C VAP_GB_HORZ_DISC_ADJ
0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
0x2284 VAP_PVS_STATE_FLUSH_REG
0x2288 VAP_PVS_VTX_TIMEOUT_REG
0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
0x22D0 VAP_PVS_CODE_CNTL_0
0x22D4 VAP_PVS_CONST_CNTL
0x22D8 VAP_PVS_CODE_CNTL_1
0x22DC VAP_PVS_FLOW_CNTL_OPC
0x2500 VAP_PVS_FLOW_CNTL_ADDRS_LW_0
0x2504 VAP_PVS_FLOW_CNTL_ADDRS_UW_0
0x2508 VAP_PVS_FLOW_CNTL_ADDRS_LW_1
0x250C VAP_PVS_FLOW_CNTL_ADDRS_UW_1
0x2510 VAP_PVS_FLOW_CNTL_ADDRS_LW_2
0x2514 VAP_PVS_FLOW_CNTL_ADDRS_UW_2
0x2518 VAP_PVS_FLOW_CNTL_ADDRS_LW_3
0x251C VAP_PVS_FLOW_CNTL_ADDRS_UW_3
0x2520 VAP_PVS_FLOW_CNTL_ADDRS_LW_4
0x2524 VAP_PVS_FLOW_CNTL_ADDRS_UW_4
0x2528 VAP_PVS_FLOW_CNTL_ADDRS_LW_5
0x252C VAP_PVS_FLOW_CNTL_ADDRS_UW_5
0x2530 VAP_PVS_FLOW_CNTL_ADDRS_LW_6
0x2534 VAP_PVS_FLOW_CNTL_ADDRS_UW_6
0x2538 VAP_PVS_FLOW_CNTL_ADDRS_LW_7
0x253C VAP_PVS_FLOW_CNTL_ADDRS_UW_7
0x2540 VAP_PVS_FLOW_CNTL_ADDRS_LW_8
0x2544 VAP_PVS_FLOW_CNTL_ADDRS_UW_8
0x2548 VAP_PVS_FLOW_CNTL_ADDRS_LW_9
0x254C VAP_PVS_FLOW_CNTL_ADDRS_UW_9
0x2550 VAP_PVS_FLOW_CNTL_ADDRS_LW_10
0x2554 VAP_PVS_FLOW_CNTL_ADDRS_UW_10
0x2558 VAP_PVS_FLOW_CNTL_ADDRS_LW_11
0x255C VAP_PVS_FLOW_CNTL_ADDRS_UW_11
0x2560 VAP_PVS_FLOW_CNTL_ADDRS_LW_12
0x2564 VAP_PVS_FLOW_CNTL_ADDRS_UW_12
0x2568 VAP_PVS_FLOW_CNTL_ADDRS_LW_13
0x256C VAP_PVS_FLOW_CNTL_ADDRS_UW_13
0x2570 VAP_PVS_FLOW_CNTL_ADDRS_LW_14
0x2574 VAP_PVS_FLOW_CNTL_ADDRS_UW_14
0x2578 VAP_PVS_FLOW_CNTL_ADDRS_LW_15
0x257C VAP_PVS_FLOW_CNTL_ADDRS_UW_15
0x342C RB2D_DSTCACHE_CTLSTAT
0x4000 GB_VAP_RASTER_VTX_FMT_0
0x4004 GB_VAP_RASTER_VTX_FMT_1
0x4008 GB_ENABLE
0x4010 GB_MSPOS0
0x4014 GB_MSPOS1
0x401C GB_SELECT
0x4020 GB_AA_CONFIG
0x4024 GB_FIFO_SIZE
0x4100 TX_INVALTAGS
0x4114 SU_TEX_WRAP_PS3
0x4118 PS3_ENABLE
0x411c PS3_VTX_FMT
0x4120 PS3_TEX_SOURCE
0x4200 GA_POINT_S0
0x4204 GA_POINT_T0
0x4208 GA_POINT_S1
0x420C GA_POINT_T1
0x4214 GA_TRIANGLE_STIPPLE
0x421C GA_POINT_SIZE
0x4230 GA_POINT_MINMAX
0x4234 GA_LINE_CNTL
0x4238 GA_LINE_STIPPLE_CONFIG
0x4258 GA_COLOR_CONTROL_PS3
0x4260 GA_LINE_STIPPLE_VALUE
0x4264 GA_LINE_S0
0x4268 GA_LINE_S1
0x4278 GA_COLOR_CONTROL
0x427C GA_SOLID_RG
0x4280 GA_SOLID_BA
0x4288 GA_POLY_MODE
0x428C GA_ROUND_MODE
0x4290 GA_OFFSET
0x4294 GA_FOG_SCALE
0x4298 GA_FOG_OFFSET
0x42A0 SU_TEX_WRAP
0x42A4 SU_POLY_OFFSET_FRONT_SCALE
0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
0x42AC SU_POLY_OFFSET_BACK_SCALE
0x42B0 SU_POLY_OFFSET_BACK_OFFSET
0x42B4 SU_POLY_OFFSET_ENABLE
0x42B8 SU_CULL_MODE
0x42C0 SU_DEPTH_SCALE
0x42C4 SU_DEPTH_OFFSET
0x42C8 SU_REG_DEST
0x4300 RS_COUNT
0x4304 RS_INST_COUNT
0x4074 RS_IP_0
0x4078 RS_IP_1
0x407C RS_IP_2
0x4080 RS_IP_3
0x4084 RS_IP_4
0x4088 RS_IP_5
0x408C RS_IP_6
0x4090 RS_IP_7
0x4094 RS_IP_8
0x4098 RS_IP_9
0x409C RS_IP_10
0x40A0 RS_IP_11
0x40A4 RS_IP_12
0x40A8 RS_IP_13
0x40AC RS_IP_14
0x40B0 RS_IP_15
0x4320 RS_INST_0
0x4324 RS_INST_1
0x4328 RS_INST_2
0x432C RS_INST_3
0x4330 RS_INST_4
0x4334 RS_INST_5
0x4338 RS_INST_6
0x433C RS_INST_7
0x4340 RS_INST_8
0x4344 RS_INST_9
0x4348 RS_INST_10
0x434C RS_INST_11
0x4350 RS_INST_12
0x4354 RS_INST_13
0x4358 RS_INST_14
0x435C RS_INST_15
0x43A8 SC_EDGERULE
0x43B0 SC_CLIP_0_A
0x43B4 SC_CLIP_0_B
0x43B8 SC_CLIP_1_A
0x43BC SC_CLIP_1_B
0x43C0 SC_CLIP_2_A
0x43C4 SC_CLIP_2_B
0x43C8 SC_CLIP_3_A
0x43CC SC_CLIP_3_B
0x43D0 SC_CLIP_RULE
0x43E0 SC_SCISSOR0
0x43E8 SC_SCREENDOOR
0x4440 TX_FILTER1_0
0x4444 TX_FILTER1_1
0x4448 TX_FILTER1_2
0x444C TX_FILTER1_3
0x4450 TX_FILTER1_4
0x4454 TX_FILTER1_5
0x4458 TX_FILTER1_6
0x445C TX_FILTER1_7
0x4460 TX_FILTER1_8
0x4464 TX_FILTER1_9
0x4468 TX_FILTER1_10
0x446C TX_FILTER1_11
0x4470 TX_FILTER1_12
0x4474 TX_FILTER1_13
0x4478 TX_FILTER1_14
0x447C TX_FILTER1_15
0x4580 TX_CHROMA_KEY_0
0x4584 TX_CHROMA_KEY_1
0x4588 TX_CHROMA_KEY_2
0x458C TX_CHROMA_KEY_3
0x4590 TX_CHROMA_KEY_4
0x4594 TX_CHROMA_KEY_5
0x4598 TX_CHROMA_KEY_6
0x459C TX_CHROMA_KEY_7
0x45A0 TX_CHROMA_KEY_8
0x45A4 TX_CHROMA_KEY_9
0x45A8 TX_CHROMA_KEY_10
0x45AC TX_CHROMA_KEY_11
0x45B0 TX_CHROMA_KEY_12
0x45B4 TX_CHROMA_KEY_13
0x45B8 TX_CHROMA_KEY_14
0x45BC TX_CHROMA_KEY_15
0x45C0 TX_BORDER_COLOR_0
0x45C4 TX_BORDER_COLOR_1
0x45C8 TX_BORDER_COLOR_2
0x45CC TX_BORDER_COLOR_3
0x45D0 TX_BORDER_COLOR_4
0x45D4 TX_BORDER_COLOR_5
0x45D8 TX_BORDER_COLOR_6
0x45DC TX_BORDER_COLOR_7
0x45E0 TX_BORDER_COLOR_8
0x45E4 TX_BORDER_COLOR_9
0x45E8 TX_BORDER_COLOR_10
0x45EC TX_BORDER_COLOR_11
0x45F0 TX_BORDER_COLOR_12
0x45F4 TX_BORDER_COLOR_13
0x45F8 TX_BORDER_COLOR_14
0x45FC TX_BORDER_COLOR_15
0x4250 GA_US_VECTOR_INDEX
0x4254 GA_US_VECTOR_DATA
0x4600 US_CONFIG
0x4604 US_PIXSIZE
0x4620 US_FC_BOOL_CONST
0x4624 US_FC_CTRL
0x4630 US_CODE_ADDR
0x4634 US_CODE_RANGE
0x4638 US_CODE_OFFSET
0x4640 US_FORMAT0_0
0x4644 US_FORMAT0_1
0x4648 US_FORMAT0_2
0x464C US_FORMAT0_3
0x4650 US_FORMAT0_4
0x4654 US_FORMAT0_5
0x4658 US_FORMAT0_6
0x465C US_FORMAT0_7
0x4660 US_FORMAT0_8
0x4664 US_FORMAT0_9
0x4668 US_FORMAT0_10
0x466C US_FORMAT0_11
0x4670 US_FORMAT0_12
0x4674 US_FORMAT0_13
0x4678 US_FORMAT0_14
0x467C US_FORMAT0_15
0x46A4 US_OUT_FMT_0
0x46A8 US_OUT_FMT_1
0x46AC US_OUT_FMT_2
0x46B0 US_OUT_FMT_3
0x46B4 US_W_FMT
0x4BC0 FG_FOG_BLEND
0x4BC4 FG_FOG_FACTOR
0x4BC8 FG_FOG_COLOR_R
0x4BCC FG_FOG_COLOR_G
0x4BD0 FG_FOG_COLOR_B
0x4BD4 FG_ALPHA_FUNC
0x4BD8 FG_DEPTH_SRC
0x4BE0 FG_ALPHA_VALUE
0x4C00 US_ALU_CONST_R_0
0x4C04 US_ALU_CONST_G_0
0x4C08 US_ALU_CONST_B_0
0x4C0C US_ALU_CONST_A_0
0x4C10 US_ALU_CONST_R_1
0x4C14 US_ALU_CONST_G_1
0x4C18 US_ALU_CONST_B_1
0x4C1C US_ALU_CONST_A_1
0x4C20 US_ALU_CONST_R_2
0x4C24 US_ALU_CONST_G_2
0x4C28 US_ALU_CONST_B_2
0x4C2C US_ALU_CONST_A_2
0x4C30 US_ALU_CONST_R_3
0x4C34 US_ALU_CONST_G_3
0x4C38 US_ALU_CONST_B_3
0x4C3C US_ALU_CONST_A_3
0x4C40 US_ALU_CONST_R_4
0x4C44 US_ALU_CONST_G_4
0x4C48 US_ALU_CONST_B_4
0x4C4C US_ALU_CONST_A_4
0x4C50 US_ALU_CONST_R_5
0x4C54 US_ALU_CONST_G_5
0x4C58 US_ALU_CONST_B_5
0x4C5C US_ALU_CONST_A_5
0x4C60 US_ALU_CONST_R_6
0x4C64 US_ALU_CONST_G_6
0x4C68 US_ALU_CONST_B_6
0x4C6C US_ALU_CONST_A_6
0x4C70 US_ALU_CONST_R_7
0x4C74 US_ALU_CONST_G_7
0x4C78 US_ALU_CONST_B_7
0x4C7C US_ALU_CONST_A_7
0x4C80 US_ALU_CONST_R_8
0x4C84 US_ALU_CONST_G_8
0x4C88 US_ALU_CONST_B_8
0x4C8C US_ALU_CONST_A_8
0x4C90 US_ALU_CONST_R_9
0x4C94 US_ALU_CONST_G_9
0x4C98 US_ALU_CONST_B_9
0x4C9C US_ALU_CONST_A_9
0x4CA0 US_ALU_CONST_R_10
0x4CA4 US_ALU_CONST_G_10
0x4CA8 US_ALU_CONST_B_10
0x4CAC US_ALU_CONST_A_10
0x4CB0 US_ALU_CONST_R_11
0x4CB4 US_ALU_CONST_G_11
0x4CB8 US_ALU_CONST_B_11
0x4CBC US_ALU_CONST_A_11
0x4CC0 US_ALU_CONST_R_12
0x4CC4 US_ALU_CONST_G_12
0x4CC8 US_ALU_CONST_B_12
0x4CCC US_ALU_CONST_A_12
0x4CD0 US_ALU_CONST_R_13
0x4CD4 US_ALU_CONST_G_13
0x4CD8 US_ALU_CONST_B_13
0x4CDC US_ALU_CONST_A_13
0x4CE0 US_ALU_CONST_R_14
0x4CE4 US_ALU_CONST_G_14
0x4CE8 US_ALU_CONST_B_14
0x4CEC US_ALU_CONST_A_14
0x4CF0 US_ALU_CONST_R_15
0x4CF4 US_ALU_CONST_G_15
0x4CF8 US_ALU_CONST_B_15
0x4CFC US_ALU_CONST_A_15
0x4D00 US_ALU_CONST_R_16
0x4D04 US_ALU_CONST_G_16
0x4D08 US_ALU_CONST_B_16
0x4D0C US_ALU_CONST_A_16
0x4D10 US_ALU_CONST_R_17
0x4D14 US_ALU_CONST_G_17
0x4D18 US_ALU_CONST_B_17
0x4D1C US_ALU_CONST_A_17
0x4D20 US_ALU_CONST_R_18
0x4D24 US_ALU_CONST_G_18
0x4D28 US_ALU_CONST_B_18
0x4D2C US_ALU_CONST_A_18
0x4D30 US_ALU_CONST_R_19
0x4D34 US_ALU_CONST_G_19
0x4D38 US_ALU_CONST_B_19
0x4D3C US_ALU_CONST_A_19
0x4D40 US_ALU_CONST_R_20
0x4D44 US_ALU_CONST_G_20
0x4D48 US_ALU_CONST_B_20
0x4D4C US_ALU_CONST_A_20
0x4D50 US_ALU_CONST_R_21
0x4D54 US_ALU_CONST_G_21
0x4D58 US_ALU_CONST_B_21
0x4D5C US_ALU_CONST_A_21
0x4D60 US_ALU_CONST_R_22
0x4D64 US_ALU_CONST_G_22
0x4D68 US_ALU_CONST_B_22
0x4D6C US_ALU_CONST_A_22
0x4D70 US_ALU_CONST_R_23
0x4D74 US_ALU_CONST_G_23
0x4D78 US_ALU_CONST_B_23
0x4D7C US_ALU_CONST_A_23
0x4D80 US_ALU_CONST_R_24
0x4D84 US_ALU_CONST_G_24
0x4D88 US_ALU_CONST_B_24
0x4D8C US_ALU_CONST_A_24
0x4D90 US_ALU_CONST_R_25
0x4D94 US_ALU_CONST_G_25
0x4D98 US_ALU_CONST_B_25
0x4D9C US_ALU_CONST_A_25
0x4DA0 US_ALU_CONST_R_26
0x4DA4 US_ALU_CONST_G_26
0x4DA8 US_ALU_CONST_B_26
0x4DAC US_ALU_CONST_A_26
0x4DB0 US_ALU_CONST_R_27
0x4DB4 US_ALU_CONST_G_27
0x4DB8 US_ALU_CONST_B_27
0x4DBC US_ALU_CONST_A_27
0x4DC0 US_ALU_CONST_R_28
0x4DC4 US_ALU_CONST_G_28
0x4DC8 US_ALU_CONST_B_28
0x4DCC US_ALU_CONST_A_28
0x4DD0 US_ALU_CONST_R_29
0x4DD4 US_ALU_CONST_G_29
0x4DD8 US_ALU_CONST_B_29
0x4DDC US_ALU_CONST_A_29
0x4DE0 US_ALU_CONST_R_30
0x4DE4 US_ALU_CONST_G_30
0x4DE8 US_ALU_CONST_B_30
0x4DEC US_ALU_CONST_A_30
0x4DF0 US_ALU_CONST_R_31
0x4DF4 US_ALU_CONST_G_31
0x4DF8 US_ALU_CONST_B_31
0x4DFC US_ALU_CONST_A_31
0x4E08 RB3D_ABLENDCNTL_R3
0x4E10 RB3D_CONSTANT_COLOR
0x4E14 RB3D_COLOR_CLEAR_VALUE
0x4E18 RB3D_ROPCNTL_R3
0x4E1C RB3D_CLRCMP_FLIPE_R3
0x4E20 RB3D_CLRCMP_CLR_R3
0x4E24 RB3D_CLRCMP_MSK_R3
0x4E48 RB3D_DEBUG_CTL
0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
0x4E50 RB3D_DITHER_CTL
0x4E54 RB3D_CMASK_OFFSET0
0x4E58 RB3D_CMASK_OFFSET1
0x4E5C RB3D_CMASK_OFFSET2
0x4E60 RB3D_CMASK_OFFSET3
0x4E64 RB3D_CMASK_PITCH0
0x4E68 RB3D_CMASK_PITCH1
0x4E6C RB3D_CMASK_PITCH2
0x4E70 RB3D_CMASK_PITCH3
0x4E74 RB3D_CMASK_WRINDEX
0x4E78 RB3D_CMASK_DWORD
0x4E7C RB3D_CMASK_RDINDEX
0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
0x4EF8 RB3D_CONSTANT_COLOR_AR
0x4EFC RB3D_CONSTANT_COLOR_GB
0x4F04 ZB_ZSTENCILCNTL
0x4F08 ZB_STENCILREFMASK
0x4F14 ZB_ZTOP
0x4F18 ZB_ZCACHE_CTLSTAT
0x4F58 ZB_ZPASS_DATA
0x4F28 ZB_DEPTHCLEARVALUE
0x4FD4 ZB_STENCILREFMASK_BF
/drivers/video/drm/radeon/rs400.c
77,7 → 77,7
{
int r;
 
if (rdev->gart.table.ram.ptr) {
if (rdev->gart.ptr) {
WARN(1, "RS400 GART already initialized\n");
return 0;
}
182,6 → 182,9
/* Enable gart */
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg));
rs400_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
209,6 → 212,7
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
uint32_t entry;
u32 *gtt = rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
218,7 → 222,7
((upper_32_bits(addr) & 0xff) << 4) |
RS400_PTE_WRITEABLE | RS400_PTE_READABLE;
entry = cpu_to_le32(entry);
rdev->gart.table.ram.ptr[i] = entry;
gtt[i] = entry;
return 0;
}
 
238,7 → 242,7
return -1;
}
 
void rs400_gpu_init(struct radeon_device *rdev)
static void rs400_gpu_init(struct radeon_device *rdev)
{
/* FIXME: is this correct ? */
r420_pipes_init(rdev);
248,7 → 252,7
}
}
 
void rs400_mc_init(struct radeon_device *rdev)
static void rs400_mc_init(struct radeon_device *rdev)
{
u64 base;
 
366,7 → 370,7
#endif
}
 
void rs400_mc_program(struct radeon_device *rdev)
static void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
 
415,11 → 419,13
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
return 0;
}
 
482,6 → 488,7
if (r)
return r;
r300_set_reg_safe(rdev);
 
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rs600.c
35,7 → 35,7
* close to the one of the R600 family (R600 likely being an evolution
* of the RS600 GART block).
*/
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
43,9 → 43,35
 
#include "rs600_reg_safe.h"
 
void rs600_gpu_init(struct radeon_device *rdev);
static void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
static const u32 crtc_offsets[2] =
{
0,
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL
};
 
void avivo_wait_for_vblank(struct radeon_device *rdev, int crtc)
{
int i;
 
if (crtc >= rdev->num_crtc)
return;
 
if (RREG32(AVIVO_D1CRTC_CONTROL + crtc_offsets[crtc]) & AVIVO_CRTC_EN) {
for (i = 0; i < rdev->usec_timeout; i++) {
if (!(RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK))
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(AVIVO_D1CRTC_STATUS + crtc_offsets[crtc]) & AVIVO_D1CRTC_V_BLANK)
break;
udelay(1);
}
}
}
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
101,6 → 127,7
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned enable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
108,19 → 135,18
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
rdev->irq.hpd[0] = true;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
rdev->irq.hpd[1] = true;
break;
default:
break;
}
enable |= 1 << radeon_connector->hpd.hpd;
radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
}
if (rdev->irq.installed)
rs600_irq_set(rdev);
// radeon_irq_kms_enable_hpd(rdev, enable);
}
 
void rs600_hpd_fini(struct radeon_device *rdev)
127,6 → 153,7
{
struct drm_device *dev = rdev->ddev;
struct drm_connector *connector;
unsigned disable = 0;
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
134,29 → 161,19
case RADEON_HPD_1:
WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
rdev->irq.hpd[0] = false;
break;
case RADEON_HPD_2:
WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
rdev->irq.hpd[1] = false;
break;
default:
break;
}
disable |= 1 << radeon_connector->hpd.hpd;
}
// radeon_irq_kms_disable_hpd(rdev, disable);
}
 
void rs600_bm_disable(struct radeon_device *rdev)
{
u32 tmp;
 
/* disable bus mastering */
tmp = PciRead16(rdev->pdev->bus, rdev->pdev->devfn, 0x4);
PciWrite16(rdev->pdev->bus, rdev->pdev->devfn, 0x4, tmp & 0xFFFB);
mdelay(1);
}
 
int rs600_asic_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
180,7 → 197,8
WREG32(RADEON_CP_RB_CNTL, tmp);
// pci_save_state(rdev->pdev);
/* disable bus mastering */
rs600_bm_disable(rdev);
// pci_clear_master(rdev->pdev);
mdelay(1);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
211,7 → 229,6
/* Check if GPU is idle */
if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) {
dev_err(rdev->dev, "failed to reset GPU\n");
rdev->gpu_lockup = true;
ret = -1;
} else
dev_info(rdev->dev, "GPU reset succeed\n");
240,11 → 257,11
tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
}
 
int rs600_gart_init(struct radeon_device *rdev)
static int rs600_gart_init(struct radeon_device *rdev)
{
int r;
 
if (rdev->gart.table.vram.robj) {
if (rdev->gart.robj) {
WARN(1, "RS600 GART already initialized\n");
return 0;
}
262,7 → 279,7
u32 tmp;
int r, i;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
315,30 → 332,25
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, (tmp | S_000009_ENABLE_PAGE_TABLES(1)));
rs600_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
void rs600_gart_disable(struct radeon_device *rdev)
static void rs600_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int r;
 
/* FIXME: disable out of gart access */
WREG32_MC(R_000100_MC_PT0_CNTL, 0);
tmp = RREG32_MC(R_000009_MC_CNTL1);
WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (r == 0) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_gart_table_vram_unpin(rdev);
}
}
}
 
void rs600_gart_fini(struct radeon_device *rdev)
static void rs600_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
353,7 → 365,7
 
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
{
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
void __iomem *ptr = (void *)rdev->gart.ptr;
 
if (i < 0 || i > rdev->gart.num_gpu_pages) {
return -EINVAL;
373,6 → 385,12
~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
u32 hdmi0;
if (ASIC_IS_DCE2(rdev))
hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
else
hdmi0 = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
379,18 → 397,15
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
if (rdev->irq.sw_int) {
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
tmp |= S_000040_GUI_IDLE(1);
}
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
atomic_read(&rdev->irq.pflip[0])) {
mode_int |= S_006540_D1MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.crtc_vblank_int[1] ||
rdev->irq.pflip[1]) {
atomic_read(&rdev->irq.pflip[1])) {
mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
}
if (rdev->irq.hpd[0]) {
399,10 → 414,15
if (rdev->irq.hpd[1]) {
hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
}
if (rdev->irq.afmt[0]) {
hdmi0 |= S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
}
WREG32(R_000040_GEN_INT_CNTL, tmp);
WREG32(R_006540_DxMODE_INT_MASK, mode_int);
WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
if (ASIC_IS_DCE2(rdev))
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
return 0;
}
 
412,12 → 432,6
uint32_t irq_mask = S_000044_SW_INT(1);
u32 tmp;
 
/* the interrupt works, but the status bit is permanently asserted */
if (rdev->irq.gui_idle && radeon_gui_idle(rdev)) {
if (!rdev->irq.gui_idle_acked)
irq_mask |= S_000044_GUI_IDLE_STAT(1);
}
 
if (G_000044_DISPLAY_INT_STAT(irqs)) {
rdev->irq.stat_regs.r500.disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
442,6 → 456,17
rdev->irq.stat_regs.r500.disp_int = 0;
}
 
if (ASIC_IS_DCE2(rdev)) {
rdev->irq.stat_regs.r500.hdmi0_status = RREG32(R_007404_HDMI0_STATUS) &
S_007404_HDMI0_AZ_FORMAT_WTRIG(1);
if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
tmp = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL);
tmp |= S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(1);
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, tmp);
}
} else
rdev->irq.stat_regs.r500.hdmi0_status = 0;
 
if (irqs) {
WREG32(R_000044_GEN_INT_STATUS, irqs);
}
450,6 → 475,9
 
void rs600_irq_disable(struct radeon_device *rdev)
{
u32 hdmi0 = RREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL) &
~S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(1);
WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
WREG32(R_000040_GEN_INT_CNTL, 0);
WREG32(R_006540_DxMODE_INT_MASK, 0);
/* Wait and acknowledge irq */
461,25 → 489,21
{
u32 status, msi_rearm;
bool queue_hotplug = false;
bool queue_hdmi = false;
 
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
 
status = rs600_irq_ack(rdev);
if (!status && !rdev->irq.stat_regs.r500.disp_int) {
if (!status &&
!rdev->irq.stat_regs.r500.disp_int &&
!rdev->irq.stat_regs.r500.hdmi0_status) {
return IRQ_NONE;
}
while (status || rdev->irq.stat_regs.r500.disp_int) {
while (status ||
rdev->irq.stat_regs.r500.disp_int ||
rdev->irq.stat_regs.r500.hdmi0_status) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
radeon_fence_process(rdev);
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
rdev->irq.gui_idle_acked = true;
rdev->pm.gui_idle = true;
// wake_up(&rdev->irq.idle_queue);
}
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(rdev->irq.stat_regs.r500.disp_int)) {
if (rdev->irq.crtc_vblank_int[0]) {
507,12 → 531,16
queue_hotplug = true;
DRM_DEBUG("HPD2\n");
}
if (G_007404_HDMI0_AZ_FORMAT_WTRIG(rdev->irq.stat_regs.r500.hdmi0_status)) {
queue_hdmi = true;
DRM_DEBUG("HDMI0\n");
}
status = rs600_irq_ack(rdev);
}
/* reset gui idle ack. the status bit is broken */
rdev->irq.gui_idle_acked = false;
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
// if (queue_hdmi)
// schedule_work(&rdev->audio_work);
if (rdev->msi_enabled) {
switch (rdev->family) {
case CHIP_RS600:
523,9 → 551,7
WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
break;
default:
msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
WREG32(RADEON_MSI_REARM_EN, msi_rearm);
WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
break;
}
}
552,7 → 578,7
return -1;
}
 
void rs600_gpu_init(struct radeon_device *rdev)
static void rs600_gpu_init(struct radeon_device *rdev)
{
r420_pipes_init(rdev);
/* Wait for mc idle */
560,7 → 586,7
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
 
void rs600_mc_init(struct radeon_device *rdev)
static void rs600_mc_init(struct radeon_device *rdev)
{
u64 base;
 
622,7 → 648,7
WREG32(R_000074_MC_IND_DATA, v);
}
 
void rs600_debugfs(struct radeon_device *rdev)
static void rs600_debugfs(struct radeon_device *rdev)
{
if (r100_debugfs_rbbm_init(rdev))
DRM_ERROR("Failed to register debugfs file for RBBM !\n");
688,11 → 714,14
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
 
return 0;
}
 
754,6 → 783,7
if (r)
return r;
rs600_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rs600d.h
485,6 → 485,20
#define S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) & 0x1) << 16)
#define G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x) (((x) >> 16) & 0x1)
#define C_007D18_DC_HOT_PLUG_DETECT2_INT_EN 0xFFFEFFFF
#define R_007404_HDMI0_STATUS 0x007404
#define S_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) & 0x1) << 28)
#define G_007404_HDMI0_AZ_FORMAT_WTRIG(x) (((x) >> 28) & 0x1)
#define C_007404_HDMI0_AZ_FORMAT_WTRIG 0xEFFFFFFF
#define S_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) & 0x1) << 29)
#define G_007404_HDMI0_AZ_FORMAT_WTRIG_INT(x) (((x) >> 29) & 0x1)
#define C_007404_HDMI0_AZ_FORMAT_WTRIG_INT 0xDFFFFFFF
#define R_007408_HDMI0_AUDIO_PACKET_CONTROL 0x007408
#define S_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) & 0x1) << 28)
#define G_007408_HDMI0_AZ_FORMAT_WTRIG_MASK(x) (((x) >> 28) & 0x1)
#define C_007408_HDMI0_AZ_FORMAT_WTRIG_MASK 0xEFFFFFFF
#define S_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) & 0x1) << 29)
#define G_007408_HDMI0_AZ_FORMAT_WTRIG_ACK(x) (((x) >> 29) & 0x1)
#define C_007408_HDMI0_AZ_FORMAT_WTRIG_ACK 0xDFFFFFFF
 
/* MC registers */
#define R_000000_MC_STATUS 0x000000
/drivers/video/drm/radeon/rs690.c
25,13 → 25,13
* Alex Deucher
* Jerome Glisse
*/
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "atom.h"
#include "rs690d.h"
 
static int rs690_mc_wait_for_idle(struct radeon_device *rdev)
int rs690_mc_wait_for_idle(struct radeon_device *rdev)
{
unsigned i;
uint32_t tmp;
145,7 → 145,7
rdev->pm.sideport_bandwidth.full = dfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
 
void rs690_mc_init(struct radeon_device *rdev)
static void rs690_mc_init(struct radeon_device *rdev)
{
u64 base;
 
224,7 → 224,7
fixed20_12 sclk;
};
 
void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rs690_watermark *wm)
{
581,7 → 581,7
WREG32(R_000078_MC_INDEX, 0x7F);
}
 
void rs690_mc_program(struct radeon_device *rdev)
static void rs690_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
 
630,11 → 630,14
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
 
return 0;
}
 
698,6 → 701,7
if (r)
return r;
rs600_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
/drivers/video/drm/radeon/rv515.c
27,7 → 27,7
*/
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "drmP.h"
#include <drm/drmP.h>
#include "rv515d.h"
#include "radeon.h"
#include "radeon_asic.h"
35,9 → 35,9
#include "rv515_reg_safe.h"
 
/* This files gather functions specifics to: rv515 */
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
void rv515_gpu_init(struct radeon_device *rdev);
static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
static int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
static void rv515_gpu_init(struct radeon_device *rdev);
int rv515_mc_wait_for_idle(struct radeon_device *rdev);
 
void rv515_debugfs(struct radeon_device *rdev)
53,46 → 53,46
}
}
 
void rv515_ring_start(struct radeon_device *rdev)
void rv515_ring_start(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
 
r = radeon_ring_lock(rdev, 64);
r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
radeon_ring_write(ring,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
radeon_ring_write(ring,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
101,8 → 101,8
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
radeon_ring_write(rdev,
radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
radeon_ring_write(ring,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
110,15 → 110,15
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(rdev, PACKET0(0x20C8, 0));
radeon_ring_write(rdev, 0);
radeon_ring_unlock_commit(rdev);
radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
radeon_ring_write(ring, PACKET0(0x20C8, 0));
radeon_ring_write(ring, 0);
radeon_ring_unlock_commit(rdev, ring);
}
 
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
143,13 → 143,13
RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
}
 
void rv515_gpu_init(struct radeon_device *rdev)
static void rv515_gpu_init(struct radeon_device *rdev)
{
unsigned pipe_select_current, gb_pipe_select, tmp;
 
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
"resetting GPU. Bad things might happen.\n");
}
rv515_vga_render_disable(rdev);
r420_pipes_init(rdev);
161,7 → 161,7
WREG32_PLL(0x000D, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"reseting GPU. Bad things might happen.\n");
"resetting GPU. Bad things might happen.\n");
}
if (rv515_mc_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait MC idle while "
189,7 → 189,7
}
}
 
void rv515_mc_init(struct radeon_device *rdev)
static void rv515_mc_init(struct radeon_device *rdev)
{
 
rv515_vram_get_type(rdev);
261,7 → 261,7
};
#endif
 
int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
static int rv515_debugfs_pipes_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv515_pipes_info_list, 1);
270,7 → 270,7
#endif
}
 
int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
static int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, rv515_ga_info_list, 1);
281,12 → 281,8
 
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
 
/* Stop all video */
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
311,19 → 307,10
/* Unlock host access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
/* Restore video state */
WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
 
void rv515_mc_program(struct radeon_device *rdev)
static void rv515_mc_program(struct radeon_device *rdev)
{
struct rv515_mc_save save;
 
401,11 → 388,13
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
r = r100_ib_init(rdev);
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
return 0;
}
 
477,6 → 466,7
if (r)
return r;
rv515_set_safe_registers(rdev);
 
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
735,7 → 725,7
fixed20_12 sclk;
};
 
void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
static void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
struct radeon_crtc *crtc,
struct rv515_watermark *wm)
{
/drivers/video/drm/radeon/rv770.c
28,10 → 28,10
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include "drmP.h"
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include "radeon_drm.h"
#include <drm/radeon_drm.h>
#include "rv770d.h"
#include "atom.h"
#include "avivod.h"
47,12 → 47,12
/*
* GART
*/
int rv770_pcie_gart_enable(struct radeon_device *rdev)
static int rv770_pcie_gart_enable(struct radeon_device *rdev)
{
u32 tmp;
int r, i;
 
if (rdev->gart.table.vram.robj == NULL) {
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
74,6 → 74,8
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
if (rdev->family == CHIP_RV740)
WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
89,14 → 91,17
WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
 
r600_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
void rv770_pcie_gart_disable(struct radeon_device *rdev)
static void rv770_pcie_gart_disable(struct radeon_device *rdev)
{
u32 tmp;
int i, r;
int i;
 
/* Disable all tables */
for (i = 0; i < 7; i++)
116,17 → 121,10
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
if (rdev->gart.table.vram.robj) {
r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->gart.table.vram.robj);
radeon_bo_unpin(rdev->gart.table.vram.robj);
radeon_bo_unreserve(rdev->gart.table.vram.robj);
radeon_gart_table_vram_unpin(rdev);
}
}
}
 
void rv770_pcie_gart_fini(struct radeon_device *rdev)
static void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
radeon_gart_fini(rdev);
rv770_pcie_gart_disable(rdev);
134,7 → 132,7
}
 
 
void rv770_agp_enable(struct radeon_device *rdev)
static void rv770_agp_enable(struct radeon_device *rdev)
{
u32 tmp;
int i;
207,7 → 205,7
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
}
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
285,229 → 283,6
/*
* Core functions
*/
static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
u32 enabled_backends_count;
u32 cur_pipe;
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
bool force_no_swizzle;
 
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
if (num_tile_pipes < 1)
num_tile_pipes = 1;
if (num_backends > R7XX_MAX_BACKENDS)
num_backends = R7XX_MAX_BACKENDS;
if (num_backends < 1)
num_backends = 1;
 
enabled_backends_mask = 0;
enabled_backends_count = 0;
for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
if (((backend_disable_mask >> i) & 1) == 0) {
enabled_backends_mask |= (1 << i);
++enabled_backends_count;
}
if (enabled_backends_count == num_backends)
break;
}
 
if (enabled_backends_count == 0) {
enabled_backends_mask = 1;
enabled_backends_count = 1;
}
 
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
 
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
 
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
swizzle_pipe[0] = 0;
break;
case 2:
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
break;
case 3:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 1;
}
break;
case 4:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 3;
swizzle_pipe[3] = 1;
}
break;
case 5:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 1;
swizzle_pipe[4] = 3;
}
break;
case 6:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 5;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
}
break;
case 7:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 5;
}
break;
case 8:
if (force_no_swizzle) {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 1;
swizzle_pipe[2] = 2;
swizzle_pipe[3] = 3;
swizzle_pipe[4] = 4;
swizzle_pipe[5] = 5;
swizzle_pipe[6] = 6;
swizzle_pipe[7] = 7;
} else {
swizzle_pipe[0] = 0;
swizzle_pipe[1] = 2;
swizzle_pipe[2] = 4;
swizzle_pipe[3] = 6;
swizzle_pipe[4] = 3;
swizzle_pipe[5] = 1;
swizzle_pipe[6] = 7;
swizzle_pipe[7] = 5;
}
break;
}
 
cur_backend = 0;
for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
while (((1 << cur_backend) & enabled_backends_mask) == 0)
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
 
backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
 
cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
}
 
return backend_map;
}
 
static void rv770_program_channel_remap(struct radeon_device *rdev)
{
u32 tcp_chan_steer, mc_shared_chremap, tmp;
bool force_no_swizzle;
 
switch (rdev->family) {
case CHIP_RV770:
case CHIP_RV730:
force_no_swizzle = false;
break;
case CHIP_RV710:
case CHIP_RV740:
default:
force_no_swizzle = true;
break;
}
 
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
case 1:
default:
/* default mapping */
mc_shared_chremap = 0x00fac688;
break;
case 2:
case 3:
if (force_no_swizzle)
mc_shared_chremap = 0x00fac688;
else
mc_shared_chremap = 0x00bbc298;
break;
}
 
if (rdev->family == CHIP_RV740)
tcp_chan_steer = 0x00ef2a60;
else
tcp_chan_steer = 0x00fac688;
 
/* RV770 CE has special chremap setup */
if (rdev->pdev->device == 0x944e) {
tcp_chan_steer = 0x00b08b08;
mc_shared_chremap = 0x00b08b08;
}
 
WREG32(TCP_CHAN_STEER, tcp_chan_steer);
WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
}
 
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
523,14 → 298,17
u32 sq_thread_resource_mgmt;
u32 hdp_host_path_cntl;
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
u32 cc_rb_backend_disable = 0;
u32 cc_gc_shader_pipe_config = 0;
u32 mc_arb_ramcfg;
u32 db_debug4;
u32 db_debug4, tmp;
u32 inactive_pipes, shader_pipe_config;
u32 disabled_rb_mask;
unsigned active_number;
 
/* setup chip specs */
rdev->config.rv770.tiling_group_size = 256;
switch (rdev->family) {
case CHIP_RV770:
rdev->config.rv770.max_pipes = 4;
641,33 → 419,70
/* setup tiling, simd, pipe config */
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
if (!(inactive_pipes & tmp)) {
active_number++;
}
tmp <<= 1;
}
if (active_number == 1) {
WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
} else {
WREG32(SPI_CONFIG_CNTL, 0);
}
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
if (tmp < rdev->config.rv770.max_backends) {
rdev->config.rv770.max_backends = tmp;
}
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
if (tmp < rdev->config.rv770.max_pipes) {
rdev->config.rv770.max_pipes = tmp;
}
tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
if (tmp < rdev->config.rv770.max_simds) {
rdev->config.rv770.max_simds = tmp;
}
 
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
default:
gb_tiling_config |= PIPE_TILING(0);
gb_tiling_config = PIPE_TILING(0);
break;
case 2:
gb_tiling_config |= PIPE_TILING(1);
gb_tiling_config = PIPE_TILING(1);
break;
case 4:
gb_tiling_config |= PIPE_TILING(2);
gb_tiling_config = PIPE_TILING(2);
break;
case 8:
gb_tiling_config |= PIPE_TILING(3);
gb_tiling_config = PIPE_TILING(3);
break;
}
rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
 
disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
R7XX_MAX_BACKENDS, disabled_rb_mask);
gb_tiling_config |= tmp << 16;
rdev->config.rv770.backend_map = tmp;
 
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else {
if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
gb_tiling_config |= BANK_TILING(1);
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
gb_tiling_config |= BANK_TILING(0);
}
rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
rdev->config.rv770.tiling_group_size = 512;
else
rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
gb_tiling_config |= SAMPLE_SPLIT(3);
679,49 → 494,19
}
 
gb_tiling_config |= BANK_SWAPS(1);
 
cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
cc_rb_backend_disable |=
BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
 
cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
 
if (rdev->family == CHIP_RV740)
backend_map = 0x28;
else
backend_map = r700_get_tile_pipe_to_backend_map(rdev,
rdev->config.rv770.max_tile_pipes,
(R7XX_MAX_BACKENDS -
r600_count_pipe_bits((cc_rb_backend_disable &
R7XX_MAX_BACKENDS_MASK) >> 16)),
(cc_rb_backend_disable >> 16));
 
rdev->config.rv770.tile_config = gb_tiling_config;
rdev->config.rv770.backend_map = backend_map;
gb_tiling_config |= BACKEND_MAP(backend_map);
 
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
 
rv770_program_channel_remap(rdev);
 
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
 
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
WREG32(CGTS_USER_TCC_DISABLE, 0);
 
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
 
num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
 
749,6 → 534,9
ACK_FLUSH_CTL(3) |
SYNC_FLUSH_CTL));
 
if (rdev->family != CHIP_RV770)
WREG32(SMX_SAR_CTL0, 0x00003f3f);
 
db_debug3 = RREG32(DB_DEBUG3);
db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
switch (rdev->family) {
782,8 → 570,6
 
WREG32(VGT_NUM_INSTANCES, 1);
 
WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
 
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 
WREG32(CP_PERFMON_CNTL, 0);
927,57 → 713,9
 
WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
NUM_CLIP_SEQ(3)));
 
WREG32(VC_ENHANCE, 0);
}
 
static int rv770_vram_scratch_init(struct radeon_device *rdev)
{
int r;
u64 gpu_addr;
 
if (rdev->vram_scratch.robj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE,
PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
&rdev->vram_scratch.robj);
if (r) {
return r;
}
}
 
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (unlikely(r != 0))
return r;
r = radeon_bo_pin(rdev->vram_scratch.robj,
RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
if (r) {
radeon_bo_unreserve(rdev->vram_scratch.robj);
return r;
}
r = radeon_bo_kmap(rdev->vram_scratch.robj,
(void **)&rdev->vram_scratch.ptr);
if (r)
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
 
return r;
}
 
static void rv770_vram_scratch_fini(struct radeon_device *rdev)
{
int r;
 
if (rdev->vram_scratch.robj == NULL) {
return;
}
r = radeon_bo_reserve(rdev->vram_scratch.robj, false);
if (likely(r == 0)) {
radeon_bo_kunmap(rdev->vram_scratch.robj);
radeon_bo_unpin(rdev->vram_scratch.robj);
radeon_bo_unreserve(rdev->vram_scratch.robj);
}
radeon_bo_unref(&rdev->vram_scratch.robj);
}
 
void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_bf, size_af;
990,7 → 728,7
}
if (rdev->flags & RADEON_IS_AGP) {
size_bf = mc->gtt_start;
size_af = 0xFFFFFFFF - mc->gtt_end + 1;
size_af = 0xFFFFFFFF - mc->gtt_end;
if (size_bf > size_af) {
if (mc->mc_vram_size > size_bf) {
dev_warn(rdev->dev, "limiting VRAM\n");
1004,7 → 742,7
mc->real_vram_size = size_af;
mc->mc_vram_size = size_af;
}
mc->vram_start = mc->gtt_end;
mc->vram_start = mc->gtt_end + 1;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
1017,7 → 755,7
}
}
 
int rv770_mc_init(struct radeon_device *rdev)
static int rv770_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
1064,6 → 802,7
 
static int rv770_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
/* enable pcie gen2 link */
1077,6 → 816,10
}
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
rv770_mc_program(rdev);
if (rdev->flags & RADEON_IS_AGP) {
rv770_agp_enable(rdev);
1085,23 → 828,21
if (r)
return r;
}
r = rv770_vram_scratch_init(rdev);
if (r)
return r;
 
rv770_gpu_init(rdev);
r = r600_blit_init(rdev);
if (r) {
// r600_blit_fini(rdev);
rdev->asic->copy = NULL;
r600_blit_fini(rdev);
rdev->asic->copy.copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
 
r = r600_video_init(rdev);
if (r) {
// r = r600_video_init(rdev);
// if (r) {
// r600_video_fini(rdev);
// rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed video blitter (%d) falling back to memcpy\n", r);
}
// dev_warn(rdev->dev, "failed video blitter (%d) falling back to memcpy\n", r);
// }
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
1117,7 → 858,9
}
r600_irq_set(rdev);
 
r = radeon_ring_init(rdev, rdev->cp.ring_size);
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
R600_CP_RB_RPTR, R600_CP_RB_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
1127,6 → 870,13
if (r)
return r;
 
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
return r;
}
 
 
return 0;
}
 
1146,10 → 896,6
{
int r;
 
/* This don't do much */
r = radeon_gem_init(rdev);
if (r)
return r;
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
1200,8 → 946,8
if (r)
return r;
 
rdev->cp.ring_obj = NULL;
r600_ring_init(rdev, 1024 * 1024);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
1217,19 → 963,6
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
if (rdev->accel_working) {
r = radeon_ib_pool_init(rdev);
if (r) {
dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
rdev->accel_working = false;
} else {
r = r600_ib_test(rdev);
if (r) {
dev_err(rdev->dev, "IB test failed (%d).\n", r);
rdev->accel_working = false;
}
}
}
 
return 0;
}
1238,6 → 971,8
{
u32 link_width_cntl, lanes, speed_cntl, tmp;
u16 link_cntl2;
u32 mask;
int ret;
 
if (radeon_pcie_gen2 == 0)
return;
1252,6 → 987,15
if (ASIC_IS_X2(rdev))
return;
 
ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
if (ret != 0)
return;
 
if (!(mask & DRM_PCIE_SPEED_50))
return;
 
DRM_INFO("enabling PCIE gen 2 link speeds, disable with radeon.pcie_gen2=0\n");
 
/* advertise upconfig capability */
link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
link_width_cntl &= ~LC_UPCONFIGURE_DIS;
/drivers/video/drm/radeon/rv770d.h
106,10 → 106,13
#define BACKEND_MAP(x) ((x) << 16)
 
#define GB_TILING_CONFIG 0x98F0
#define PIPE_TILING__SHIFT 1
#define PIPE_TILING__MASK 0x0000000e
 
#define GC_USER_SHADER_PIPE_CONFIG 0x8954
#define INACTIVE_QD_PIPES(x) ((x) << 8)
#define INACTIVE_QD_PIPES_MASK 0x0000FF00
#define INACTIVE_QD_PIPES_SHIFT 8
#define INACTIVE_SIMDS(x) ((x) << 16)
#define INACTIVE_SIMDS_MASK 0x00FF0000
 
174,6 → 177,7
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define MC_VM_MD_L1_TLB3_CNTL 0x2698
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
207,6 → 211,7
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
 
#define SMX_SAR_CTL0 0xA008
#define SMX_DC_CTL0 0xA020
#define USE_HASH_FUNCTION (1 << 0)
#define CACHE_DEPTH(x) ((x) << 1)
306,6 → 311,8
#define TCP_CNTL 0x9610
#define TCP_CHAN_STEER 0x9614
 
#define VC_ENHANCE 0x9714
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x)<<0)
#define VC_ONLY 0
353,6 → 360,197
 
#define SRBM_STATUS 0x0E50
 
/* DCE 3.2 HDMI */
#define HDMI_CONTROL 0x7400
# define HDMI_KEEPOUT_MODE (1 << 0)
# define HDMI_PACKET_GEN_VERSION (1 << 4) /* 0 = r6xx compat */
# define HDMI_ERROR_ACK (1 << 8)
# define HDMI_ERROR_MASK (1 << 9)
#define HDMI_STATUS 0x7404
# define HDMI_ACTIVE_AVMUTE (1 << 0)
# define HDMI_AUDIO_PACKET_ERROR (1 << 16)
# define HDMI_VBI_PACKET_ERROR (1 << 20)
#define HDMI_AUDIO_PACKET_CONTROL 0x7408
# define HDMI_AUDIO_DELAY_EN(x) (((x) & 3) << 4)
# define HDMI_AUDIO_PACKETS_PER_LINE(x) (((x) & 0x1f) << 16)
#define HDMI_ACR_PACKET_CONTROL 0x740c
# define HDMI_ACR_SEND (1 << 0)
# define HDMI_ACR_CONT (1 << 1)
# define HDMI_ACR_SELECT(x) (((x) & 3) << 4)
# define HDMI_ACR_HW 0
# define HDMI_ACR_32 1
# define HDMI_ACR_44 2
# define HDMI_ACR_48 3
# define HDMI_ACR_SOURCE (1 << 8) /* 0 - hw; 1 - cts value */
# define HDMI_ACR_AUTO_SEND (1 << 12)
#define HDMI_VBI_PACKET_CONTROL 0x7410
# define HDMI_NULL_SEND (1 << 0)
# define HDMI_GC_SEND (1 << 4)
# define HDMI_GC_CONT (1 << 5) /* 0 - once; 1 - every frame */
#define HDMI_INFOFRAME_CONTROL0 0x7414
# define HDMI_AVI_INFO_SEND (1 << 0)
# define HDMI_AVI_INFO_CONT (1 << 1)
# define HDMI_AUDIO_INFO_SEND (1 << 4)
# define HDMI_AUDIO_INFO_CONT (1 << 5)
# define HDMI_MPEG_INFO_SEND (1 << 8)
# define HDMI_MPEG_INFO_CONT (1 << 9)
#define HDMI_INFOFRAME_CONTROL1 0x7418
# define HDMI_AVI_INFO_LINE(x) (((x) & 0x3f) << 0)
# define HDMI_AUDIO_INFO_LINE(x) (((x) & 0x3f) << 8)
# define HDMI_MPEG_INFO_LINE(x) (((x) & 0x3f) << 16)
#define HDMI_GENERIC_PACKET_CONTROL 0x741c
# define HDMI_GENERIC0_SEND (1 << 0)
# define HDMI_GENERIC0_CONT (1 << 1)
# define HDMI_GENERIC1_SEND (1 << 4)
# define HDMI_GENERIC1_CONT (1 << 5)
# define HDMI_GENERIC0_LINE(x) (((x) & 0x3f) << 16)
# define HDMI_GENERIC1_LINE(x) (((x) & 0x3f) << 24)
#define HDMI_GC 0x7428
# define HDMI_GC_AVMUTE (1 << 0)
#define AFMT_AUDIO_PACKET_CONTROL2 0x742c
# define AFMT_AUDIO_LAYOUT_OVRD (1 << 0)
# define AFMT_AUDIO_LAYOUT_SELECT (1 << 1)
# define AFMT_60958_CS_SOURCE (1 << 4)
# define AFMT_AUDIO_CHANNEL_ENABLE(x) (((x) & 0xff) << 8)
# define AFMT_DP_AUDIO_STREAM_ID(x) (((x) & 0xff) << 16)
#define AFMT_AVI_INFO0 0x7454
# define AFMT_AVI_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AVI_INFO_S(x) (((x) & 3) << 8)
# define AFMT_AVI_INFO_B(x) (((x) & 3) << 10)
# define AFMT_AVI_INFO_A(x) (((x) & 1) << 12)
# define AFMT_AVI_INFO_Y(x) (((x) & 3) << 13)
# define AFMT_AVI_INFO_Y_RGB 0
# define AFMT_AVI_INFO_Y_YCBCR422 1
# define AFMT_AVI_INFO_Y_YCBCR444 2
# define AFMT_AVI_INFO_Y_A_B_S(x) (((x) & 0xff) << 8)
# define AFMT_AVI_INFO_R(x) (((x) & 0xf) << 16)
# define AFMT_AVI_INFO_M(x) (((x) & 0x3) << 20)
# define AFMT_AVI_INFO_C(x) (((x) & 0x3) << 22)
# define AFMT_AVI_INFO_C_M_R(x) (((x) & 0xff) << 16)
# define AFMT_AVI_INFO_SC(x) (((x) & 0x3) << 24)
# define AFMT_AVI_INFO_Q(x) (((x) & 0x3) << 26)
# define AFMT_AVI_INFO_EC(x) (((x) & 0x3) << 28)
# define AFMT_AVI_INFO_ITC(x) (((x) & 0x1) << 31)
# define AFMT_AVI_INFO_ITC_EC_Q_SC(x) (((x) & 0xff) << 24)
#define AFMT_AVI_INFO1 0x7458
# define AFMT_AVI_INFO_VIC(x) (((x) & 0x7f) << 0) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_PR(x) (((x) & 0xf) << 8) /* don't use avi infoframe v1 */
# define AFMT_AVI_INFO_TOP(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO2 0x745c
# define AFMT_AVI_INFO_BOTTOM(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_LEFT(x) (((x) & 0xffff) << 16)
#define AFMT_AVI_INFO3 0x7460
# define AFMT_AVI_INFO_RIGHT(x) (((x) & 0xffff) << 0)
# define AFMT_AVI_INFO_VERSION(x) (((x) & 3) << 24)
#define AFMT_MPEG_INFO0 0x7464
# define AFMT_MPEG_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MB0(x) (((x) & 0xff) << 8)
# define AFMT_MPEG_INFO_MB1(x) (((x) & 0xff) << 16)
# define AFMT_MPEG_INFO_MB2(x) (((x) & 0xff) << 24)
#define AFMT_MPEG_INFO1 0x7468
# define AFMT_MPEG_INFO_MB3(x) (((x) & 0xff) << 0)
# define AFMT_MPEG_INFO_MF(x) (((x) & 3) << 8)
# define AFMT_MPEG_INFO_FR(x) (((x) & 1) << 12)
#define AFMT_GENERIC0_HDR 0x746c
#define AFMT_GENERIC0_0 0x7470
#define AFMT_GENERIC0_1 0x7474
#define AFMT_GENERIC0_2 0x7478
#define AFMT_GENERIC0_3 0x747c
#define AFMT_GENERIC0_4 0x7480
#define AFMT_GENERIC0_5 0x7484
#define AFMT_GENERIC0_6 0x7488
#define AFMT_GENERIC1_HDR 0x748c
#define AFMT_GENERIC1_0 0x7490
#define AFMT_GENERIC1_1 0x7494
#define AFMT_GENERIC1_2 0x7498
#define AFMT_GENERIC1_3 0x749c
#define AFMT_GENERIC1_4 0x74a0
#define AFMT_GENERIC1_5 0x74a4
#define AFMT_GENERIC1_6 0x74a8
#define HDMI_ACR_32_0 0x74ac
# define HDMI_ACR_CTS_32(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_32_1 0x74b0
# define HDMI_ACR_N_32(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_44_0 0x74b4
# define HDMI_ACR_CTS_44(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_44_1 0x74b8
# define HDMI_ACR_N_44(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_48_0 0x74bc
# define HDMI_ACR_CTS_48(x) (((x) & 0xfffff) << 12)
#define HDMI_ACR_48_1 0x74c0
# define HDMI_ACR_N_48(x) (((x) & 0xfffff) << 0)
#define HDMI_ACR_STATUS_0 0x74c4
#define HDMI_ACR_STATUS_1 0x74c8
#define AFMT_AUDIO_INFO0 0x74cc
# define AFMT_AUDIO_INFO_CHECKSUM(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_CC(x) (((x) & 7) << 8)
# define AFMT_AUDIO_INFO_CHECKSUM_OFFSET(x) (((x) & 0xff) << 16)
#define AFMT_AUDIO_INFO1 0x74d0
# define AFMT_AUDIO_INFO_CA(x) (((x) & 0xff) << 0)
# define AFMT_AUDIO_INFO_LSV(x) (((x) & 0xf) << 11)
# define AFMT_AUDIO_INFO_DM_INH(x) (((x) & 1) << 15)
# define AFMT_AUDIO_INFO_DM_INH_LSV(x) (((x) & 0xff) << 8)
#define AFMT_60958_0 0x74d4
# define AFMT_60958_CS_A(x) (((x) & 1) << 0)
# define AFMT_60958_CS_B(x) (((x) & 1) << 1)
# define AFMT_60958_CS_C(x) (((x) & 1) << 2)
# define AFMT_60958_CS_D(x) (((x) & 3) << 3)
# define AFMT_60958_CS_MODE(x) (((x) & 3) << 6)
# define AFMT_60958_CS_CATEGORY_CODE(x) (((x) & 0xff) << 8)
# define AFMT_60958_CS_SOURCE_NUMBER(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_L(x) (((x) & 0xf) << 20)
# define AFMT_60958_CS_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 24)
# define AFMT_60958_CS_CLOCK_ACCURACY(x) (((x) & 3) << 28)
#define AFMT_60958_1 0x74d8
# define AFMT_60958_CS_WORD_LENGTH(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_VALID_L(x) (((x) & 1) << 16)
# define AFMT_60958_CS_VALID_R(x) (((x) & 1) << 18)
# define AFMT_60958_CS_CHANNEL_NUMBER_R(x) (((x) & 0xf) << 20)
#define AFMT_AUDIO_CRC_CONTROL 0x74dc
# define AFMT_AUDIO_CRC_EN (1 << 0)
#define AFMT_RAMP_CONTROL0 0x74e0
# define AFMT_RAMP_MAX_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_RAMP_DATA_SIGN (1 << 31)
#define AFMT_RAMP_CONTROL1 0x74e4
# define AFMT_RAMP_MIN_COUNT(x) (((x) & 0xffffff) << 0)
# define AFMT_AUDIO_TEST_CH_DISABLE(x) (((x) & 0xff) << 24)
#define AFMT_RAMP_CONTROL2 0x74e8
# define AFMT_RAMP_INC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_RAMP_CONTROL3 0x74ec
# define AFMT_RAMP_DEC_COUNT(x) (((x) & 0xffffff) << 0)
#define AFMT_60958_2 0x74f0
# define AFMT_60958_CS_CHANNEL_NUMBER_2(x) (((x) & 0xf) << 0)
# define AFMT_60958_CS_CHANNEL_NUMBER_3(x) (((x) & 0xf) << 4)
# define AFMT_60958_CS_CHANNEL_NUMBER_4(x) (((x) & 0xf) << 8)
# define AFMT_60958_CS_CHANNEL_NUMBER_5(x) (((x) & 0xf) << 12)
# define AFMT_60958_CS_CHANNEL_NUMBER_6(x) (((x) & 0xf) << 16)
# define AFMT_60958_CS_CHANNEL_NUMBER_7(x) (((x) & 0xf) << 20)
#define AFMT_STATUS 0x7600
# define AFMT_AUDIO_ENABLE (1 << 4)
# define AFMT_AZ_FORMAT_WTRIG (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_INT (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG (1 << 30)
#define AFMT_AUDIO_PACKET_CONTROL 0x7604
# define AFMT_AUDIO_SAMPLE_SEND (1 << 0)
# define AFMT_AUDIO_TEST_EN (1 << 12)
# define AFMT_AUDIO_CHANNEL_SWAP (1 << 24)
# define AFMT_60958_CS_UPDATE (1 << 26)
# define AFMT_AZ_AUDIO_ENABLE_CHG_MASK (1 << 27)
# define AFMT_AZ_FORMAT_WTRIG_MASK (1 << 28)
# define AFMT_AZ_FORMAT_WTRIG_ACK (1 << 29)
# define AFMT_AZ_AUDIO_ENABLE_CHG_ACK (1 << 30)
#define AFMT_VBI_PACKET_CONTROL 0x7608
# define AFMT_GENERIC0_UPDATE (1 << 2)
#define AFMT_INFOFRAME_CONTROL0 0x760c
# define AFMT_AUDIO_INFO_SOURCE (1 << 6) /* 0 - sound block; 1 - hmdi regs */
# define AFMT_AUDIO_INFO_UPDATE (1 << 7)
# define AFMT_MPEG_INFO_UPDATE (1 << 10)
#define AFMT_GENERIC0_7 0x7610
/* second instance starts at 0x7800 */
#define HDMI_OFFSET0 (0x7400 - 0x7400)
#define HDMI_OFFSET1 (0x7800 - 0x7400)
 
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6914
#define D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x6114
/drivers/video/drm/radeon/si.c
0,0 → 1,3985
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#include <linux/firmware.h>
//#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include "radeon.h"
#include "radeon_asic.h"
#include <drm/radeon_drm.h>
#include "sid.h"
#include "atom.h"
#include "si_blit_shaders.h"
 
#define SI_PFP_UCODE_SIZE 2144
#define SI_PM4_UCODE_SIZE 2144
#define SI_CE_UCODE_SIZE 2144
#define SI_RLC_UCODE_SIZE 2048
#define SI_MC_UCODE_SIZE 7769
 
MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
MODULE_FIRMWARE("radeon/TAHITI_me.bin");
MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
MODULE_FIRMWARE("radeon/VERDE_me.bin");
MODULE_FIRMWARE("radeon/VERDE_ce.bin");
MODULE_FIRMWARE("radeon/VERDE_mc.bin");
MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
 
extern int r600_ih_ring_alloc(struct radeon_device *rdev);
extern void r600_ih_ring_fini(struct radeon_device *rdev);
extern void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev);
extern void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save);
extern u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev);
 
/* get temperature in millidegrees */
int si_get_temp(struct radeon_device *rdev)
{
u32 temp;
int actual_temp = 0;
 
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
CTF_TEMP_SHIFT;
 
if (temp & 0x200)
actual_temp = 255;
else
actual_temp = temp & 0x1ff;
 
actual_temp = (actual_temp * 1000);
 
return actual_temp;
}
 
#define TAHITI_IO_MC_REGS_SIZE 36
 
static const u32 tahiti_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000006f, 0x03044000},
{0x00000070, 0x0480c018},
{0x00000071, 0x00000040},
{0x00000072, 0x01000000},
{0x00000074, 0x000000ff},
{0x00000075, 0x00143400},
{0x00000076, 0x08ec0800},
{0x00000077, 0x040000cc},
{0x00000079, 0x00000000},
{0x0000007a, 0x21000409},
{0x0000007c, 0x00000000},
{0x0000007d, 0xe8000000},
{0x0000007e, 0x044408a8},
{0x0000007f, 0x00000003},
{0x00000080, 0x00000000},
{0x00000081, 0x01000000},
{0x00000082, 0x02000000},
{0x00000083, 0x00000000},
{0x00000084, 0xe3f3e4f4},
{0x00000085, 0x00052024},
{0x00000087, 0x00000000},
{0x00000088, 0x66036603},
{0x00000089, 0x01000000},
{0x0000008b, 0x1c0a0000},
{0x0000008c, 0xff010000},
{0x0000008e, 0xffffefff},
{0x0000008f, 0xfff3efff},
{0x00000090, 0xfff3efbf},
{0x00000094, 0x00101101},
{0x00000095, 0x00000fff},
{0x00000096, 0x00116fff},
{0x00000097, 0x60010000},
{0x00000098, 0x10010000},
{0x00000099, 0x00006000},
{0x0000009a, 0x00001000},
{0x0000009f, 0x00a77400}
};
 
static const u32 pitcairn_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000006f, 0x03044000},
{0x00000070, 0x0480c018},
{0x00000071, 0x00000040},
{0x00000072, 0x01000000},
{0x00000074, 0x000000ff},
{0x00000075, 0x00143400},
{0x00000076, 0x08ec0800},
{0x00000077, 0x040000cc},
{0x00000079, 0x00000000},
{0x0000007a, 0x21000409},
{0x0000007c, 0x00000000},
{0x0000007d, 0xe8000000},
{0x0000007e, 0x044408a8},
{0x0000007f, 0x00000003},
{0x00000080, 0x00000000},
{0x00000081, 0x01000000},
{0x00000082, 0x02000000},
{0x00000083, 0x00000000},
{0x00000084, 0xe3f3e4f4},
{0x00000085, 0x00052024},
{0x00000087, 0x00000000},
{0x00000088, 0x66036603},
{0x00000089, 0x01000000},
{0x0000008b, 0x1c0a0000},
{0x0000008c, 0xff010000},
{0x0000008e, 0xffffefff},
{0x0000008f, 0xfff3efff},
{0x00000090, 0xfff3efbf},
{0x00000094, 0x00101101},
{0x00000095, 0x00000fff},
{0x00000096, 0x00116fff},
{0x00000097, 0x60010000},
{0x00000098, 0x10010000},
{0x00000099, 0x00006000},
{0x0000009a, 0x00001000},
{0x0000009f, 0x00a47400}
};
 
static const u32 verde_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
{0x0000006f, 0x03044000},
{0x00000070, 0x0480c018},
{0x00000071, 0x00000040},
{0x00000072, 0x01000000},
{0x00000074, 0x000000ff},
{0x00000075, 0x00143400},
{0x00000076, 0x08ec0800},
{0x00000077, 0x040000cc},
{0x00000079, 0x00000000},
{0x0000007a, 0x21000409},
{0x0000007c, 0x00000000},
{0x0000007d, 0xe8000000},
{0x0000007e, 0x044408a8},
{0x0000007f, 0x00000003},
{0x00000080, 0x00000000},
{0x00000081, 0x01000000},
{0x00000082, 0x02000000},
{0x00000083, 0x00000000},
{0x00000084, 0xe3f3e4f4},
{0x00000085, 0x00052024},
{0x00000087, 0x00000000},
{0x00000088, 0x66036603},
{0x00000089, 0x01000000},
{0x0000008b, 0x1c0a0000},
{0x0000008c, 0xff010000},
{0x0000008e, 0xffffefff},
{0x0000008f, 0xfff3efff},
{0x00000090, 0xfff3efbf},
{0x00000094, 0x00101101},
{0x00000095, 0x00000fff},
{0x00000096, 0x00116fff},
{0x00000097, 0x60010000},
{0x00000098, 0x10010000},
{0x00000099, 0x00006000},
{0x0000009a, 0x00001000},
{0x0000009f, 0x00a37400}
};
 
/* ucode loading */
static int si_mc_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
u32 running, blackout = 0;
u32 *io_mc_regs;
int i, ucode_size, regs_size;
 
if (!rdev->mc_fw)
return -EINVAL;
 
switch (rdev->family) {
case CHIP_TAHITI:
io_mc_regs = (u32 *)&tahiti_io_mc_regs;
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_PITCAIRN:
io_mc_regs = (u32 *)&pitcairn_io_mc_regs;
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
case CHIP_VERDE:
default:
io_mc_regs = (u32 *)&verde_io_mc_regs;
ucode_size = SI_MC_UCODE_SIZE;
regs_size = TAHITI_IO_MC_REGS_SIZE;
break;
}
 
running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
 
if (running == 0) {
if (running) {
blackout = RREG32(MC_SHARED_BLACKOUT_CNTL);
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout | 1);
}
 
/* reset the engine and set to writable */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
WREG32(MC_SEQ_SUP_CNTL, 0x00000010);
 
/* load mc io regs */
for (i = 0; i < regs_size; i++) {
WREG32(MC_SEQ_IO_DEBUG_INDEX, io_mc_regs[(i << 1)]);
WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
}
/* load the MC ucode */
fw_data = (const __be32 *)rdev->mc_fw->data;
for (i = 0; i < ucode_size; i++)
WREG32(MC_SEQ_SUP_PGM, be32_to_cpup(fw_data++));
 
/* put the engine back into the active state */
WREG32(MC_SEQ_SUP_CNTL, 0x00000008);
WREG32(MC_SEQ_SUP_CNTL, 0x00000004);
WREG32(MC_SEQ_SUP_CNTL, 0x00000001);
 
/* wait for training to complete */
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D0)
break;
udelay(1);
}
for (i = 0; i < rdev->usec_timeout; i++) {
if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL) & TRAIN_DONE_D1)
break;
udelay(1);
}
 
if (running)
WREG32(MC_SHARED_BLACKOUT_CNTL, blackout);
}
 
return 0;
}
 
static int si_init_microcode(struct radeon_device *rdev)
{
struct platform_device *pdev;
const char *chip_name;
const char *rlc_chip_name;
size_t pfp_req_size, me_req_size, ce_req_size, rlc_req_size, mc_req_size;
char fw_name[30];
int err;
 
DRM_DEBUG("\n");
 
pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
err = IS_ERR(pdev);
if (err) {
printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
return -EINVAL;
}
 
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
rlc_chip_name = "TAHITI";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
rlc_chip_name = "PITCAIRN";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
case CHIP_VERDE:
chip_name = "VERDE";
rlc_chip_name = "VERDE";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
ce_req_size = SI_CE_UCODE_SIZE * 4;
rlc_req_size = SI_RLC_UCODE_SIZE * 4;
mc_req_size = SI_MC_UCODE_SIZE * 4;
break;
default: BUG();
}
 
DRM_INFO("Loading %s Microcode\n", chip_name);
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", chip_name);
err = request_firmware(&rdev->pfp_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->pfp_fw->size != pfp_req_size) {
printk(KERN_ERR
"si_cp: Bogus length %zu in firmware \"%s\"\n",
rdev->pfp_fw->size, fw_name);
err = -EINVAL;
goto out;
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_me.bin", chip_name);
err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->me_fw->size != me_req_size) {
printk(KERN_ERR
"si_cp: Bogus length %zu in firmware \"%s\"\n",
rdev->me_fw->size, fw_name);
err = -EINVAL;
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_ce.bin", chip_name);
err = request_firmware(&rdev->ce_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->ce_fw->size != ce_req_size) {
printk(KERN_ERR
"si_cp: Bogus length %zu in firmware \"%s\"\n",
rdev->ce_fw->size, fw_name);
err = -EINVAL;
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_rlc.bin", rlc_chip_name);
err = request_firmware(&rdev->rlc_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->rlc_fw->size != rlc_req_size) {
printk(KERN_ERR
"si_rlc: Bogus length %zu in firmware \"%s\"\n",
rdev->rlc_fw->size, fw_name);
err = -EINVAL;
}
 
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, &pdev->dev);
if (err)
goto out;
if (rdev->mc_fw->size != mc_req_size) {
printk(KERN_ERR
"si_mc: Bogus length %zu in firmware \"%s\"\n",
rdev->mc_fw->size, fw_name);
err = -EINVAL;
}
 
out:
platform_device_unregister(pdev);
 
if (err) {
if (err != -EINVAL)
printk(KERN_ERR
"si_cp: Failed to load firmware \"%s\"\n",
fw_name);
release_firmware(rdev->pfp_fw);
rdev->pfp_fw = NULL;
release_firmware(rdev->me_fw);
rdev->me_fw = NULL;
release_firmware(rdev->ce_fw);
rdev->ce_fw = NULL;
release_firmware(rdev->rlc_fw);
rdev->rlc_fw = NULL;
release_firmware(rdev->mc_fw);
rdev->mc_fw = NULL;
}
return err;
}
 
/* watermark setup */
static u32 dce6_line_buffer_adjust(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *other_mode)
{
u32 tmp;
/*
* Line Buffer Setup
* There are 3 line buffers, each one shared by 2 display controllers.
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
* the display controllers. The paritioning is done via one of four
* preset allocations specified in bits 21:20:
* 0 - half lb
* 2 - whole lb, other crtc must be disabled
*/
/* this can get tricky if we have two large displays on a paired group
* of crtcs. Ideally for multiple large displays we'd assign them to
* non-linked crtcs for maximum line buffer allocation.
*/
if (radeon_crtc->base.enabled && mode) {
if (other_mode)
tmp = 0; /* 1/2 */
else
tmp = 2; /* whole */
} else
tmp = 0;
 
WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset,
DC_LB_MEMORY_CONFIG(tmp));
 
if (radeon_crtc->base.enabled && mode) {
switch (tmp) {
case 0:
default:
return 4096 * 2;
case 2:
return 8192 * 2;
}
}
 
/* controller not enabled, so no lb used */
return 0;
}
 
static u32 si_get_number_of_dram_channels(struct radeon_device *rdev)
{
u32 tmp = RREG32(MC_SHARED_CHMAP);
 
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
default:
return 1;
case 1:
return 2;
case 2:
return 4;
case 3:
return 8;
case 4:
return 3;
case 5:
return 6;
case 6:
return 10;
case 7:
return 12;
case 8:
return 16;
}
}
 
struct dce6_wm_params {
u32 dram_channels; /* number of dram channels */
u32 yclk; /* bandwidth per dram data pin in kHz */
u32 sclk; /* engine clock in kHz */
u32 disp_clk; /* display clock in kHz */
u32 src_width; /* viewport width */
u32 active_time; /* active display time in ns */
u32 blank_time; /* blank time in ns */
bool interlaced; /* mode is interlaced */
fixed20_12 vsc; /* vertical scale ratio */
u32 num_heads; /* number of active crtcs */
u32 bytes_per_pixel; /* bytes per pixel display + overlay */
u32 lb_size; /* line buffer allocated to pipe */
u32 vtaps; /* vertical scaler taps */
};
 
static u32 dce6_dram_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate raw DRAM Bandwidth */
fixed20_12 dram_efficiency; /* 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
 
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
dram_efficiency.full = dfixed_const(7);
dram_efficiency.full = dfixed_div(dram_efficiency, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
 
return dfixed_trunc(bandwidth);
}
 
static u32 dce6_dram_bandwidth_for_display(struct dce6_wm_params *wm)
{
/* Calculate DRAM Bandwidth and the part allocated to display. */
fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
fixed20_12 yclk, dram_channels, bandwidth;
fixed20_12 a;
 
a.full = dfixed_const(1000);
yclk.full = dfixed_const(wm->yclk);
yclk.full = dfixed_div(yclk, a);
dram_channels.full = dfixed_const(wm->dram_channels * 4);
a.full = dfixed_const(10);
disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
bandwidth.full = dfixed_mul(dram_channels, yclk);
bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
 
return dfixed_trunc(bandwidth);
}
 
static u32 dce6_data_return_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the display Data return Bandwidth */
fixed20_12 return_efficiency; /* 0.8 */
fixed20_12 sclk, bandwidth;
fixed20_12 a;
 
a.full = dfixed_const(1000);
sclk.full = dfixed_const(wm->sclk);
sclk.full = dfixed_div(sclk, a);
a.full = dfixed_const(10);
return_efficiency.full = dfixed_const(8);
return_efficiency.full = dfixed_div(return_efficiency, a);
a.full = dfixed_const(32);
bandwidth.full = dfixed_mul(a, sclk);
bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
 
return dfixed_trunc(bandwidth);
}
 
static u32 dce6_get_dmif_bytes_per_request(struct dce6_wm_params *wm)
{
return 32;
}
 
static u32 dce6_dmif_request_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the DMIF Request Bandwidth */
fixed20_12 disp_clk_request_efficiency; /* 0.8 */
fixed20_12 disp_clk, sclk, bandwidth;
fixed20_12 a, b1, b2;
u32 min_bandwidth;
 
a.full = dfixed_const(1000);
disp_clk.full = dfixed_const(wm->disp_clk);
disp_clk.full = dfixed_div(disp_clk, a);
a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm) / 2);
b1.full = dfixed_mul(a, disp_clk);
 
a.full = dfixed_const(1000);
sclk.full = dfixed_const(wm->sclk);
sclk.full = dfixed_div(sclk, a);
a.full = dfixed_const(dce6_get_dmif_bytes_per_request(wm));
b2.full = dfixed_mul(a, sclk);
 
a.full = dfixed_const(10);
disp_clk_request_efficiency.full = dfixed_const(8);
disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
 
min_bandwidth = min(dfixed_trunc(b1), dfixed_trunc(b2));
 
a.full = dfixed_const(min_bandwidth);
bandwidth.full = dfixed_mul(a, disp_clk_request_efficiency);
 
return dfixed_trunc(bandwidth);
}
 
static u32 dce6_available_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
u32 dram_bandwidth = dce6_dram_bandwidth(wm);
u32 data_return_bandwidth = dce6_data_return_bandwidth(wm);
u32 dmif_req_bandwidth = dce6_dmif_request_bandwidth(wm);
 
return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
}
 
static u32 dce6_average_bandwidth(struct dce6_wm_params *wm)
{
/* Calculate the display mode Average Bandwidth
* DisplayMode should contain the source and destination dimensions,
* timing, etc.
*/
fixed20_12 bpp;
fixed20_12 line_time;
fixed20_12 src_width;
fixed20_12 bandwidth;
fixed20_12 a;
 
a.full = dfixed_const(1000);
line_time.full = dfixed_const(wm->active_time + wm->blank_time);
line_time.full = dfixed_div(line_time, a);
bpp.full = dfixed_const(wm->bytes_per_pixel);
src_width.full = dfixed_const(wm->src_width);
bandwidth.full = dfixed_mul(src_width, bpp);
bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
bandwidth.full = dfixed_div(bandwidth, line_time);
 
return dfixed_trunc(bandwidth);
}
 
static u32 dce6_latency_watermark(struct dce6_wm_params *wm)
{
/* First calcualte the latency in ns */
u32 mc_latency = 2000; /* 2000 ns. */
u32 available_bandwidth = dce6_available_bandwidth(wm);
u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
(wm->num_heads * cursor_line_pair_return_time);
u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
u32 tmp, dmif_size = 12288;
fixed20_12 a, b, c;
 
if (wm->num_heads == 0)
return 0;
 
a.full = dfixed_const(2);
b.full = dfixed_const(1);
if ((wm->vsc.full > a.full) ||
((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
(wm->vtaps >= 5) ||
((wm->vsc.full >= a.full) && wm->interlaced))
max_src_lines_per_dst_line = 4;
else
max_src_lines_per_dst_line = 2;
 
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
 
b.full = dfixed_const(mc_latency + 512);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(b, c);
 
c.full = dfixed_const(dmif_size);
b.full = dfixed_div(c, b);
 
tmp = min(dfixed_trunc(a), dfixed_trunc(b));
 
b.full = dfixed_const(1000);
c.full = dfixed_const(wm->disp_clk);
b.full = dfixed_div(c, b);
c.full = dfixed_const(wm->bytes_per_pixel);
b.full = dfixed_mul(b, c);
 
lb_fill_bw = min(tmp, dfixed_trunc(b));
 
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
c.full = dfixed_const(lb_fill_bw);
b.full = dfixed_div(c, b);
a.full = dfixed_div(a, b);
line_fill_time = dfixed_trunc(a);
 
if (line_fill_time < wm->active_time)
return latency;
else
return latency + (line_fill_time - wm->active_time);
 
}
 
static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params *wm)
{
if (dce6_average_bandwidth(wm) <=
(dce6_dram_bandwidth_for_display(wm) / wm->num_heads))
return true;
else
return false;
};
 
static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params *wm)
{
if (dce6_average_bandwidth(wm) <=
(dce6_available_bandwidth(wm) / wm->num_heads))
return true;
else
return false;
};
 
static bool dce6_check_latency_hiding(struct dce6_wm_params *wm)
{
u32 lb_partitions = wm->lb_size / wm->src_width;
u32 line_time = wm->active_time + wm->blank_time;
u32 latency_tolerant_lines;
u32 latency_hiding;
fixed20_12 a;
 
a.full = dfixed_const(1);
if (wm->vsc.full > a.full)
latency_tolerant_lines = 1;
else {
if (lb_partitions <= (wm->vtaps + 1))
latency_tolerant_lines = 1;
else
latency_tolerant_lines = 2;
}
 
latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
 
if (dce6_latency_watermark(wm) <= latency_hiding)
return true;
else
return false;
}
 
static void dce6_program_watermarks(struct radeon_device *rdev,
struct radeon_crtc *radeon_crtc,
u32 lb_size, u32 num_heads)
{
struct drm_display_mode *mode = &radeon_crtc->base.mode;
struct dce6_wm_params wm;
u32 pixel_period;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 priority_a_mark = 0, priority_b_mark = 0;
u32 priority_a_cnt = PRIORITY_OFF;
u32 priority_b_cnt = PRIORITY_OFF;
u32 tmp, arb_control3;
fixed20_12 a, b, c;
 
if (radeon_crtc->base.enabled && num_heads && mode) {
pixel_period = 1000000 / (u32)mode->clock;
line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
priority_a_cnt = 0;
priority_b_cnt = 0;
 
wm.yclk = rdev->pm.current_mclk * 10;
wm.sclk = rdev->pm.current_sclk * 10;
wm.disp_clk = mode->clock;
wm.src_width = mode->crtc_hdisplay;
wm.active_time = mode->crtc_hdisplay * pixel_period;
wm.blank_time = line_time - wm.active_time;
wm.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
wm.interlaced = true;
wm.vsc = radeon_crtc->vsc;
wm.vtaps = 1;
if (radeon_crtc->rmx_type != RMX_OFF)
wm.vtaps = 2;
wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
wm.lb_size = lb_size;
if (rdev->family == CHIP_ARUBA)
wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
else
wm.dram_channels = si_get_number_of_dram_channels(rdev);
wm.num_heads = num_heads;
 
/* set for high clocks */
latency_watermark_a = min(dce6_latency_watermark(&wm), (u32)65535);
/* set for low clocks */
/* wm.yclk = low clk; wm.sclk = low clk */
latency_watermark_b = min(dce6_latency_watermark(&wm), (u32)65535);
 
/* possibly force display priority to high */
/* should really do this at mode validation time... */
if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
!dce6_average_bandwidth_vs_available_bandwidth(&wm) ||
!dce6_check_latency_hiding(&wm) ||
(rdev->disp_priority == 2)) {
DRM_DEBUG_KMS("force priority to high\n");
priority_a_cnt |= PRIORITY_ALWAYS_ON;
priority_b_cnt |= PRIORITY_ALWAYS_ON;
}
 
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_a);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, radeon_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_a_mark = dfixed_trunc(c);
priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
 
a.full = dfixed_const(1000);
b.full = dfixed_const(mode->clock);
b.full = dfixed_div(b, a);
c.full = dfixed_const(latency_watermark_b);
c.full = dfixed_mul(c, b);
c.full = dfixed_mul(c, radeon_crtc->hsc);
c.full = dfixed_div(c, a);
a.full = dfixed_const(16);
c.full = dfixed_div(c, a);
priority_b_mark = dfixed_trunc(c);
priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
}
 
/* select wm A */
arb_control3 = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
tmp = arb_control3;
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(1);
WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
(LATENCY_LOW_WATERMARK(latency_watermark_a) |
LATENCY_HIGH_WATERMARK(line_time)));
/* select wm B */
tmp = RREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset);
tmp &= ~LATENCY_WATERMARK_MASK(3);
tmp |= LATENCY_WATERMARK_MASK(2);
WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, tmp);
WREG32(DPG_PIPE_LATENCY_CONTROL + radeon_crtc->crtc_offset,
(LATENCY_LOW_WATERMARK(latency_watermark_b) |
LATENCY_HIGH_WATERMARK(line_time)));
/* restore original selection */
WREG32(DPG_PIPE_ARBITRATION_CONTROL3 + radeon_crtc->crtc_offset, arb_control3);
 
/* write the priority marks */
WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
 
}
 
void dce6_bandwidth_update(struct radeon_device *rdev)
{
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
u32 num_heads = 0, lb_size;
int i;
 
radeon_update_display_priority(rdev);
 
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->mode_info.crtcs[i]->base.enabled)
num_heads++;
}
for (i = 0; i < rdev->num_crtc; i += 2) {
mode0 = &rdev->mode_info.crtcs[i]->base.mode;
mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
lb_size = dce6_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
dce6_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
}
}
 
/*
* Core functions
*/
static void si_tiling_mode_table_init(struct radeon_device *rdev)
{
const u32 num_tile_mode_states = 32;
u32 reg_offset, gb_tile_moden, split_equal_to_row_size;
 
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_1KB;
break;
case 2:
default:
split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_2KB;
break;
case 4:
split_equal_to_row_size = ADDR_SURF_TILE_SPLIT_4KB;
break;
}
 
if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN)) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 1: /* 2xAA/4xAA compressed depth only */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 2: /* 8xAA compressed depth only */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 8: /* 1D and 1D Array Surfaces */
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 9: /* Displayable maps. */
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 10: /* Display 8bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 11: /* Display 16bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 12: /* Display 32bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
case 13: /* Thin. */
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 14: /* Thin 8 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
case 15: /* Thin 16 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
case 16: /* Thin 32 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
case 17: /* Thin 64 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
case 21: /* 8 bpp PRT. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 22: /* 16 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 23: /* 32 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 24: /* 64 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 25: /* 128 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
NUM_BANKS(ADDR_SURF_8_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
default:
gb_tile_moden = 0;
break;
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else if (rdev->family == CHIP_VERDE) {
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0: /* non-AA compressed depth or any compressed stencil */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 1: /* 2xAA/4xAA compressed depth only */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 2: /* 8xAA compressed depth only */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 8: /* 1D and 1D Array Surfaces */
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 9: /* Displayable maps. */
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 10: /* Display 8bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 11: /* Display 16bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 12: /* Display 32bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 13: /* Thin. */
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 14: /* Thin 8 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 15: /* Thin 16 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 16: /* Thin 32 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 17: /* Thin 64 bpp. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P4_8x16) |
TILE_SPLIT(split_equal_to_row_size) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 21: /* 8 bpp PRT. */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 22: /* 16 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4));
break;
case 23: /* 32 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 24: /* 64 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
NUM_BANKS(ADDR_SURF_16_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2));
break;
case 25: /* 128 bpp PRT */
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING) |
PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB) |
NUM_BANKS(ADDR_SURF_8_BANK) |
BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1));
break;
default:
gb_tile_moden = 0;
break;
}
WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
}
} else
DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
}
 
static void si_select_se_sh(struct radeon_device *rdev,
u32 se_num, u32 sh_num)
{
u32 data = INSTANCE_BROADCAST_WRITES;
 
if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
else if (se_num == 0xffffffff)
data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
else if (sh_num == 0xffffffff)
data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
else
data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
WREG32(GRBM_GFX_INDEX, data);
}
 
static u32 si_create_bitmask(u32 bit_width)
{
u32 i, mask = 0;
 
for (i = 0; i < bit_width; i++) {
mask <<= 1;
mask |= 1;
}
return mask;
}
 
static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
{
u32 data, mask;
 
data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
if (data & 1)
data &= INACTIVE_CUS_MASK;
else
data = 0;
data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
 
data >>= INACTIVE_CUS_SHIFT;
 
mask = si_create_bitmask(cu_per_sh);
 
return ~data & mask;
}
 
static void si_setup_spi(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
u32 cu_per_sh)
{
int i, j, k;
u32 data, mask, active_cu;
 
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
data = RREG32(SPI_STATIC_THREAD_MGMT_3);
active_cu = si_get_cu_enabled(rdev, cu_per_sh);
 
mask = 1;
for (k = 0; k < 16; k++) {
mask <<= k;
if (active_cu & mask) {
data &= ~mask;
WREG32(SPI_STATIC_THREAD_MGMT_3, data);
break;
}
}
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
}
 
static u32 si_get_rb_disabled(struct radeon_device *rdev,
u32 max_rb_num, u32 se_num,
u32 sh_per_se)
{
u32 data, mask;
 
data = RREG32(CC_RB_BACKEND_DISABLE);
if (data & 1)
data &= BACKEND_DISABLE_MASK;
else
data = 0;
data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
 
data >>= BACKEND_DISABLE_SHIFT;
 
mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
 
return data & mask;
}
 
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
u32 max_rb_num)
{
int i, j;
u32 data, mask;
u32 disabled_rbs = 0;
u32 enabled_rbs = 0;
 
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
 
mask = 1;
for (i = 0; i < max_rb_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
 
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;
for (j = 0; j < sh_per_se; j++) {
switch (enabled_rbs & 3) {
case 1:
data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
break;
case 2:
data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
break;
case 3:
default:
data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
break;
}
enabled_rbs >>= 2;
}
WREG32(PA_SC_RASTER_CONFIG, data);
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
}
 
static void si_gpu_init(struct radeon_device *rdev)
{
u32 gb_addr_config = 0;
u32 mc_shared_chmap, mc_arb_ramcfg;
u32 sx_debug_1;
u32 hdp_host_path_cntl;
u32 tmp;
int i, j;
 
switch (rdev->family) {
case CHIP_TAHITI:
rdev->config.si.max_shader_engines = 2;
rdev->config.si.max_tile_pipes = 12;
rdev->config.si.max_cu_per_sh = 8;
rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 12;
rdev->config.si.max_gprs = 256;
rdev->config.si.max_gs_threads = 32;
rdev->config.si.max_hw_contexts = 8;
 
rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_PITCAIRN:
rdev->config.si.max_shader_engines = 2;
rdev->config.si.max_tile_pipes = 8;
rdev->config.si.max_cu_per_sh = 5;
rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 8;
rdev->config.si.max_gprs = 256;
rdev->config.si.max_gs_threads = 32;
rdev->config.si.max_hw_contexts = 8;
 
rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
rdev->config.si.sc_prim_fifo_size_backend = 0x100;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_VERDE:
default:
rdev->config.si.max_shader_engines = 1;
rdev->config.si.max_tile_pipes = 4;
rdev->config.si.max_cu_per_sh = 2;
rdev->config.si.max_sh_per_se = 2;
rdev->config.si.max_backends_per_se = 4;
rdev->config.si.max_texture_channel_caches = 4;
rdev->config.si.max_gprs = 256;
rdev->config.si.max_gs_threads = 32;
rdev->config.si.max_hw_contexts = 8;
 
rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
rdev->config.si.sc_prim_fifo_size_backend = 0x40;
rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
break;
}
 
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
 
WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff));
 
evergreen_fix_pci_max_read_req_size(rdev);
 
WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN);
 
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
 
rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
rdev->config.si.mem_max_burst_length_bytes = 256;
tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
if (rdev->config.si.mem_row_size_in_kb > 4)
rdev->config.si.mem_row_size_in_kb = 4;
/* XXX use MC settings? */
rdev->config.si.shader_engine_tile_size = 32;
rdev->config.si.num_gpus = 1;
rdev->config.si.multi_gpu_tile_size = 64;
 
/* fix up row size */
gb_addr_config &= ~ROW_SIZE_MASK;
switch (rdev->config.si.mem_row_size_in_kb) {
case 1:
default:
gb_addr_config |= ROW_SIZE(0);
break;
case 2:
gb_addr_config |= ROW_SIZE(1);
break;
case 4:
gb_addr_config |= ROW_SIZE(2);
break;
}
 
/* setup tiling info dword. gb_addr_config is not adequate since it does
* not have bank info, so create a custom tiling dword.
* bits 3:0 num_pipes
* bits 7:4 num_banks
* bits 11:8 group_size
* bits 15:12 row_size
*/
rdev->config.si.tile_config = 0;
switch (rdev->config.si.num_tile_pipes) {
case 1:
rdev->config.si.tile_config |= (0 << 0);
break;
case 2:
rdev->config.si.tile_config |= (1 << 0);
break;
case 4:
rdev->config.si.tile_config |= (2 << 0);
break;
case 8:
default:
/* XXX what about 12? */
rdev->config.si.tile_config |= (3 << 0);
break;
}
switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) {
case 0: /* four banks */
rdev->config.si.tile_config |= 0 << 4;
break;
case 1: /* eight banks */
rdev->config.si.tile_config |= 1 << 4;
break;
case 2: /* sixteen banks */
default:
rdev->config.si.tile_config |= 2 << 4;
break;
}
rdev->config.si.tile_config |=
((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
rdev->config.si.tile_config |=
((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
 
WREG32(GB_ADDR_CONFIG, gb_addr_config);
WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
WREG32(HDP_ADDR_CONFIG, gb_addr_config);
 
si_tiling_mode_table_init(rdev);
 
si_setup_rb(rdev, rdev->config.si.max_shader_engines,
rdev->config.si.max_sh_per_se,
rdev->config.si.max_backends_per_se);
 
si_setup_spi(rdev, rdev->config.si.max_shader_engines,
rdev->config.si.max_sh_per_se,
rdev->config.si.max_cu_per_sh);
 
 
/* set HW defaults for 3D engine */
WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
ROQ_IB2_START(0x2b)));
WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
 
sx_debug_1 = RREG32(SX_DEBUG_1);
WREG32(SX_DEBUG_1, sx_debug_1);
 
WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
 
WREG32(PA_SC_FIFO_SIZE, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_frontend) |
SC_BACKEND_PRIM_FIFO_SIZE(rdev->config.si.sc_prim_fifo_size_backend) |
SC_HIZ_TILE_FIFO_SIZE(rdev->config.si.sc_hiz_tile_fifo_size) |
SC_EARLYZ_TILE_FIFO_SIZE(rdev->config.si.sc_earlyz_tile_fifo_size)));
 
WREG32(VGT_NUM_INSTANCES, 1);
 
WREG32(CP_PERFMON_CNTL, 0);
 
WREG32(SQ_CONFIG, 0);
 
WREG32(PA_SC_FORCE_EOV_MAX_CNTS, (FORCE_EOV_MAX_CLK_CNT(4095) |
FORCE_EOV_MAX_REZ_CNT(255)));
 
WREG32(VGT_CACHE_INVALIDATION, CACHE_INVALIDATION(VC_AND_TC) |
AUTO_INVLD_EN(ES_AND_GS_AUTO));
 
WREG32(VGT_GS_VERTEX_REUSE, 16);
WREG32(PA_SC_LINE_STIPPLE_STATE, 0);
 
WREG32(CB_PERFCOUNTER0_SELECT0, 0);
WREG32(CB_PERFCOUNTER0_SELECT1, 0);
WREG32(CB_PERFCOUNTER1_SELECT0, 0);
WREG32(CB_PERFCOUNTER1_SELECT1, 0);
WREG32(CB_PERFCOUNTER2_SELECT0, 0);
WREG32(CB_PERFCOUNTER2_SELECT1, 0);
WREG32(CB_PERFCOUNTER3_SELECT0, 0);
WREG32(CB_PERFCOUNTER3_SELECT1, 0);
 
tmp = RREG32(HDP_MISC_CNTL);
tmp |= HDP_FLUSH_INVALIDATE_CACHE;
WREG32(HDP_MISC_CNTL, tmp);
 
hdp_host_path_cntl = RREG32(HDP_HOST_PATH_CNTL);
WREG32(HDP_HOST_PATH_CNTL, hdp_host_path_cntl);
 
WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
 
udelay(50);
}
 
/*
* GPU scratch registers helpers function.
*/
static void si_scratch_init(struct radeon_device *rdev)
{
int i;
 
rdev->scratch.num_reg = 7;
rdev->scratch.reg_base = SCRATCH_REG0;
for (i = 0; i < rdev->scratch.num_reg; i++) {
rdev->scratch.free[i] = true;
rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
}
}
 
void si_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
struct radeon_ring *ring = &rdev->ring[fence->ring];
u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
 
/* flush read cache over gart */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
PACKET3_SH_KCACHE_ACTION_ENA |
PACKET3_SH_ICACHE_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_INDEX(5));
radeon_ring_write(ring, addr & 0xffffffff);
radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
radeon_ring_write(ring, fence->seq);
radeon_ring_write(ring, 0);
}
 
/*
* IB stuff
*/
void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
struct radeon_ring *ring = &rdev->ring[ib->ring];
u32 header;
 
if (ib->is_const_ib) {
/* set switch buffer packet before const IB */
radeon_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
radeon_ring_write(ring, 0);
 
header = PACKET3(PACKET3_INDIRECT_BUFFER_CONST, 2);
} else {
u32 next_rptr;
if (ring->rptr_save_reg) {
next_rptr = ring->wptr + 3 + 4 + 8;
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, ((ring->rptr_save_reg -
PACKET3_SET_CONFIG_REG_START) >> 2));
radeon_ring_write(ring, next_rptr);
} else if (rdev->wb.enabled) {
next_rptr = ring->wptr + 5 + 4 + 8;
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (1 << 8));
radeon_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
radeon_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
radeon_ring_write(ring, next_rptr);
}
 
header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
}
 
radeon_ring_write(ring, header);
radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
radeon_ring_write(ring, ib->length_dw |
(ib->vm ? (ib->vm->id << 24) : 0));
 
if (!ib->is_const_ib) {
/* flush read cache over gart for this vmid */
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
radeon_ring_write(ring, ib->vm ? ib->vm->id : 0);
radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
PACKET3_TC_ACTION_ENA |
PACKET3_SH_KCACHE_ACTION_ENA |
PACKET3_SH_ICACHE_ACTION_ENA);
radeon_ring_write(ring, 0xFFFFFFFF);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 10); /* poll interval */
}
}
 
/*
* CP.
*/
static void si_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32(CP_ME_CNTL, 0);
else {
// radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT));
WREG32(SCRATCH_UMSK, 0);
}
udelay(50);
}
 
static int si_cp_load_microcode(struct radeon_device *rdev)
{
const __be32 *fw_data;
int i;
 
if (!rdev->me_fw || !rdev->pfp_fw)
return -EINVAL;
 
si_cp_enable(rdev, false);
 
/* PFP */
fw_data = (const __be32 *)rdev->pfp_fw->data;
WREG32(CP_PFP_UCODE_ADDR, 0);
for (i = 0; i < SI_PFP_UCODE_SIZE; i++)
WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_PFP_UCODE_ADDR, 0);
 
/* CE */
fw_data = (const __be32 *)rdev->ce_fw->data;
WREG32(CP_CE_UCODE_ADDR, 0);
for (i = 0; i < SI_CE_UCODE_SIZE; i++)
WREG32(CP_CE_UCODE_DATA, be32_to_cpup(fw_data++));
WREG32(CP_CE_UCODE_ADDR, 0);
 
/* ME */
fw_data = (const __be32 *)rdev->me_fw->data;
WREG32(CP_ME_RAM_WADDR, 0);
for (i = 0; i < SI_PM4_UCODE_SIZE; i++)
WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
WREG32(CP_ME_RAM_WADDR, 0);
 
WREG32(CP_PFP_UCODE_ADDR, 0);
WREG32(CP_CE_UCODE_ADDR, 0);
WREG32(CP_ME_RAM_WADDR, 0);
WREG32(CP_ME_RAM_RADDR, 0);
return 0;
}
 
static int si_cp_start(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
 
r = radeon_ring_lock(rdev, ring, 7 + 4);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* init the CP */
radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
radeon_ring_write(ring, 0x1);
radeon_ring_write(ring, 0x0);
radeon_ring_write(ring, rdev->config.si.max_hw_contexts - 1);
radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0);
 
/* init the CE partitions */
radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
radeon_ring_write(ring, 0xc000);
radeon_ring_write(ring, 0xe000);
radeon_ring_unlock_commit(rdev, ring);
 
si_cp_enable(rdev, true);
 
r = radeon_ring_lock(rdev, ring, si_default_size + 10);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
 
/* setup clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
 
for (i = 0; i < si_default_size; i++)
radeon_ring_write(ring, si_default_state[i]);
 
radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
 
/* set clear context state */
radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
 
radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
radeon_ring_write(ring, 0x00000316);
radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
radeon_ring_write(ring, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
 
radeon_ring_unlock_commit(rdev, ring);
 
for (i = RADEON_RING_TYPE_GFX_INDEX; i <= CAYMAN_RING_TYPE_CP2_INDEX; ++i) {
ring = &rdev->ring[i];
r = radeon_ring_lock(rdev, ring, 2);
 
/* clear the compute context state */
radeon_ring_write(ring, PACKET3_COMPUTE(PACKET3_CLEAR_STATE, 0));
radeon_ring_write(ring, 0);
 
radeon_ring_unlock_commit(rdev, ring);
}
 
return 0;
}
 
static void si_cp_fini(struct radeon_device *rdev)
{
struct radeon_ring *ring;
si_cp_enable(rdev, false);
 
// ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
// radeon_ring_fini(rdev, ring);
// radeon_scratch_free(rdev, ring->rptr_save_reg);
 
// ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
// radeon_ring_fini(rdev, ring);
// radeon_scratch_free(rdev, ring->rptr_save_reg);
 
// ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
// radeon_ring_fini(rdev, ring);
// radeon_scratch_free(rdev, ring->rptr_save_reg);
}
 
static int si_cp_resume(struct radeon_device *rdev)
{
struct radeon_ring *ring;
u32 tmp;
u32 rb_bufsz;
int r;
 
/* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
WREG32(GRBM_SOFT_RESET, (SOFT_RESET_CP |
SOFT_RESET_PA |
SOFT_RESET_VGT |
SOFT_RESET_SPI |
SOFT_RESET_SX));
RREG32(GRBM_SOFT_RESET);
mdelay(15);
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
 
WREG32(CP_SEM_WAIT_TIMER, 0x0);
WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
 
WREG32(CP_DEBUG, 0);
WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
 
/* ring 0 - compute and gfx */
/* Set ring buffer size */
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB0_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
ring->wptr = 0;
WREG32(CP_RB0_WPTR, ring->wptr);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB0_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
 
if (rdev->wb.enabled)
WREG32(SCRATCH_UMSK, 0xff);
else {
tmp |= RB_NO_UPDATE;
WREG32(SCRATCH_UMSK, 0);
}
 
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
 
WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
 
ring->rptr = RREG32(CP_RB0_RPTR);
 
/* ring1 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB1_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
ring->wptr = 0;
WREG32(CP_RB1_WPTR, ring->wptr);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB1_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFF);
 
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
 
WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
 
ring->rptr = RREG32(CP_RB1_RPTR);
 
/* ring2 - compute only */
/* Set ring buffer size */
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB2_CNTL, tmp);
 
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
ring->wptr = 0;
WREG32(CP_RB2_WPTR, ring->wptr);
 
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
WREG32(CP_RB2_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFF);
 
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
 
WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
 
ring->rptr = RREG32(CP_RB2_RPTR);
 
/* start the rings */
si_cp_start(rdev);
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = true;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = true;
r = radeon_ring_test(rdev, RADEON_RING_TYPE_GFX_INDEX, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r;
}
r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP1_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX]);
if (r) {
rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
}
r = radeon_ring_test(rdev, CAYMAN_RING_TYPE_CP2_INDEX, &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX]);
if (r) {
rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
}
 
return 0;
}
 
bool si_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status, grbm_status2;
u32 grbm_status_se0, grbm_status_se1;
 
srbm_status = RREG32(SRBM_STATUS);
grbm_status = RREG32(GRBM_STATUS);
grbm_status2 = RREG32(GRBM_STATUS2);
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
radeon_ring_lockup_update(ring);
return false;
}
/* force CP activities */
radeon_ring_force_activity(rdev, ring);
return radeon_ring_test_lockup(rdev, ring);
}
 
static int si_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 grbm_reset = 0;
 
if (!(RREG32(GRBM_STATUS) & GUI_ACTIVE))
return 0;
 
dev_info(rdev->dev, "GPU softreset \n");
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(GRBM_STATUS2));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
evergreen_mc_stop(rdev, &save);
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Disable CP parsing/prefetching */
WREG32(CP_ME_CNTL, CP_ME_HALT | CP_PFP_HALT | CP_CE_HALT);
 
/* reset all the gfx blocks */
grbm_reset = (SOFT_RESET_CP |
SOFT_RESET_CB |
SOFT_RESET_DB |
SOFT_RESET_GDS |
SOFT_RESET_PA |
SOFT_RESET_SC |
SOFT_RESET_BCI |
SOFT_RESET_SPI |
SOFT_RESET_SX |
SOFT_RESET_TC |
SOFT_RESET_TA |
SOFT_RESET_VGT |
SOFT_RESET_IA);
 
dev_info(rdev->dev, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset);
WREG32(GRBM_SOFT_RESET, grbm_reset);
(void)RREG32(GRBM_SOFT_RESET);
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS2=0x%08X\n",
RREG32(GRBM_STATUS2));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
RREG32(GRBM_STATUS_SE0));
dev_info(rdev->dev, " GRBM_STATUS_SE1=0x%08X\n",
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
evergreen_mc_resume(rdev, &save);
return 0;
}
 
int si_asic_reset(struct radeon_device *rdev)
{
return si_gpu_soft_reset(rdev);
}
 
/* MC */
static void si_mc_program(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 tmp;
int i, j;
 
/* Initialize HDP */
for (i = 0, j = 0; i < 32; i++, j += 0x18) {
WREG32((0x2c14 + j), 0x00000000);
WREG32((0x2c18 + j), 0x00000000);
WREG32((0x2c1c + j), 0x00000000);
WREG32((0x2c20 + j), 0x00000000);
WREG32((0x2c24 + j), 0x00000000);
}
WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
 
evergreen_mc_stop(rdev, &save);
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
/* Lockout access through VGA aperture*/
WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
/* Update configuration */
WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
rdev->mc.vram_start >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
rdev->mc.vram_end >> 12);
WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR,
rdev->vram_scratch.gpu_addr >> 12);
tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
WREG32(MC_VM_FB_LOCATION, tmp);
/* XXX double check these! */
WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
WREG32(HDP_NONSURFACE_INFO, (2 << 7) | (1 << 30));
WREG32(HDP_NONSURFACE_SIZE, 0x3FFFFFFF);
WREG32(MC_VM_AGP_BASE, 0);
WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
if (radeon_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
}
evergreen_mc_resume(rdev, &save);
/* we need to own VRAM, so turn off the VGA renderer here
* to stop it overwriting our objects */
rv515_vga_render_disable(rdev);
}
 
/* SI MC address space is 40 bits */
static void si_vram_location(struct radeon_device *rdev,
struct radeon_mc *mc, u64 base)
{
mc->vram_start = base;
if (mc->mc_vram_size > (0xFFFFFFFFFFULL - base + 1)) {
dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
mc->real_vram_size = mc->aper_size;
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
 
static void si_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
u64 size_af, size_bf;
 
size_af = ((0xFFFFFFFFFFULL - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
size_bf = mc->vram_start & ~mc->gtt_base_align;
if (size_bf > size_af) {
if (mc->gtt_size > size_bf) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_bf;
}
mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
} else {
if (mc->gtt_size > size_af) {
dev_warn(rdev->dev, "limiting GTT\n");
mc->gtt_size = size_af;
}
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
 
static void si_vram_gtt_location(struct radeon_device *rdev,
struct radeon_mc *mc)
{
if (mc->mc_vram_size > 0xFFC0000000ULL) {
/* leave room for at least 1024M GTT */
dev_warn(rdev->dev, "limiting VRAM\n");
mc->real_vram_size = 0xFFC0000000ULL;
mc->mc_vram_size = 0xFFC0000000ULL;
}
si_vram_location(rdev, &rdev->mc, 0);
rdev->mc.gtt_base_align = 0;
si_gtt_location(rdev, mc);
}
 
static int si_mc_init(struct radeon_device *rdev)
{
u32 tmp;
int chansize, numchan;
 
/* Get VRAM informations */
rdev->mc.vram_is_ddr = true;
tmp = RREG32(MC_ARB_RAMCFG);
if (tmp & CHANSIZE_OVERRIDE) {
chansize = 16;
} else if (tmp & CHANSIZE_MASK) {
chansize = 64;
} else {
chansize = 32;
}
tmp = RREG32(MC_SHARED_CHMAP);
switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
case 0:
default:
numchan = 1;
break;
case 1:
numchan = 2;
break;
case 2:
numchan = 4;
break;
case 3:
numchan = 8;
break;
case 4:
numchan = 3;
break;
case 5:
numchan = 6;
break;
case 6:
numchan = 10;
break;
case 7:
numchan = 12;
break;
case 8:
numchan = 16;
break;
}
rdev->mc.vram_width = numchan * chansize;
/* Could aper size report 0 ? */
rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
/* size in MB on si */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
rdev->mc.visible_vram_size = rdev->mc.aper_size;
si_vram_gtt_location(rdev, &rdev->mc);
radeon_update_bandwidth_info(rdev);
 
return 0;
}
 
/*
* GART
*/
void si_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
/* flush hdp cache */
WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
 
/* bits 0-15 are the VM contexts0-15 */
WREG32(VM_INVALIDATE_REQUEST, 1);
}
 
static int si_pcie_gart_enable(struct radeon_device *rdev)
{
int r, i;
 
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
return -EINVAL;
}
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL,
(0xA << 7) |
ENABLE_L1_TLB |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS | INVALIDATE_L2_CACHE);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(0));
/* setup context0 */
WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, (ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT));
 
WREG32(0x15D4, 0);
WREG32(0x15D8, 0);
WREG32(0x15DC, 0);
 
/* empty context1-15 */
/* set vm size, must be a multiple of 4 */
WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
/* Assign the pt base to something valid for now; the pts used for
* the VMs are determined by the application and setup and assigned
* on the fly in the vm part of radeon_gart.c
*/
for (i = 1; i < 16; i++) {
if (i < 8)
WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
rdev->gart.table_addr >> 12);
else
WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((i - 8) << 2),
rdev->gart.table_addr >> 12);
}
 
/* enable context1-15 */
WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
(u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(1) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
 
si_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
(unsigned long long)rdev->gart.table_addr);
rdev->gart.ready = true;
return 0;
}
 
static void si_pcie_gart_disable(struct radeon_device *rdev)
{
/* Disable all tables */
WREG32(VM_CONTEXT0_CNTL, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
/* Setup TLB control */
WREG32(MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE |
EFFECTIVE_L2_QUEUE_SIZE(7) |
CONTEXT1_IDENTITY_ACCESS_MODE(1));
WREG32(VM_L2_CNTL2, 0);
WREG32(VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY |
L2_CACHE_BIGK_FRAGMENT_SIZE(0));
radeon_gart_table_vram_unpin(rdev);
}
 
static void si_pcie_gart_fini(struct radeon_device *rdev)
{
si_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
// radeon_gart_fini(rdev);
}
 
/* vm parser */
static bool si_vm_reg_valid(u32 reg)
{
/* context regs are fine */
if (reg >= 0x28000)
return true;
 
/* check config regs */
switch (reg) {
case GRBM_GFX_INDEX:
case VGT_VTX_VECT_EJECT_REG:
case VGT_CACHE_INVALIDATION:
case VGT_ESGS_RING_SIZE:
case VGT_GSVS_RING_SIZE:
case VGT_GS_VERTEX_REUSE:
case VGT_PRIMITIVE_TYPE:
case VGT_INDEX_TYPE:
case VGT_NUM_INDICES:
case VGT_NUM_INSTANCES:
case VGT_TF_RING_SIZE:
case VGT_HS_OFFCHIP_PARAM:
case VGT_TF_MEMORY_BASE:
case PA_CL_ENHANCE:
case PA_SU_LINE_STIPPLE_VALUE:
case PA_SC_LINE_STIPPLE_STATE:
case PA_SC_ENHANCE:
case SQC_CACHES:
case SPI_STATIC_THREAD_MGMT_1:
case SPI_STATIC_THREAD_MGMT_2:
case SPI_STATIC_THREAD_MGMT_3:
case SPI_PS_MAX_WAVE_ID:
case SPI_CONFIG_CNTL:
case SPI_CONFIG_CNTL_1:
case TA_CNTL_AUX:
return true;
default:
DRM_ERROR("Invalid register 0x%x in CS\n", reg);
return false;
}
}
 
static int si_vm_packet3_ce_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
switch (pkt->opcode) {
case PACKET3_NOP:
case PACKET3_SET_BASE:
case PACKET3_SET_CE_DE_COUNTERS:
case PACKET3_LOAD_CONST_RAM:
case PACKET3_WRITE_CONST_RAM:
case PACKET3_WRITE_CONST_RAM_OFFSET:
case PACKET3_DUMP_CONST_RAM:
case PACKET3_INCREMENT_CE_COUNTER:
case PACKET3_WAIT_ON_DE_COUNTER:
case PACKET3_CE_WRITE:
break;
default:
DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
}
return 0;
}
 
static int si_vm_packet3_gfx_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, end_reg, reg, i;
 
switch (pkt->opcode) {
case PACKET3_NOP:
case PACKET3_SET_BASE:
case PACKET3_CLEAR_STATE:
case PACKET3_INDEX_BUFFER_SIZE:
case PACKET3_DISPATCH_DIRECT:
case PACKET3_DISPATCH_INDIRECT:
case PACKET3_ALLOC_GDS:
case PACKET3_WRITE_GDS_RAM:
case PACKET3_ATOMIC_GDS:
case PACKET3_ATOMIC:
case PACKET3_OCCLUSION_QUERY:
case PACKET3_SET_PREDICATION:
case PACKET3_COND_EXEC:
case PACKET3_PRED_EXEC:
case PACKET3_DRAW_INDIRECT:
case PACKET3_DRAW_INDEX_INDIRECT:
case PACKET3_INDEX_BASE:
case PACKET3_DRAW_INDEX_2:
case PACKET3_CONTEXT_CONTROL:
case PACKET3_INDEX_TYPE:
case PACKET3_DRAW_INDIRECT_MULTI:
case PACKET3_DRAW_INDEX_AUTO:
case PACKET3_DRAW_INDEX_IMMD:
case PACKET3_NUM_INSTANCES:
case PACKET3_DRAW_INDEX_MULTI_AUTO:
case PACKET3_STRMOUT_BUFFER_UPDATE:
case PACKET3_DRAW_INDEX_OFFSET_2:
case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
case PACKET3_DRAW_INDEX_INDIRECT_MULTI:
case PACKET3_MPEG_INDEX:
case PACKET3_WAIT_REG_MEM:
case PACKET3_MEM_WRITE:
case PACKET3_PFP_SYNC_ME:
case PACKET3_SURFACE_SYNC:
case PACKET3_EVENT_WRITE:
case PACKET3_EVENT_WRITE_EOP:
case PACKET3_EVENT_WRITE_EOS:
case PACKET3_SET_CONTEXT_REG:
case PACKET3_SET_CONTEXT_REG_INDIRECT:
case PACKET3_SET_SH_REG:
case PACKET3_SET_SH_REG_OFFSET:
case PACKET3_INCREMENT_DE_COUNTER:
case PACKET3_WAIT_ON_CE_COUNTER:
case PACKET3_WAIT_ON_AVAIL_BUFFER:
case PACKET3_ME_WRITE:
break;
case PACKET3_COPY_DATA:
if ((idx_value & 0xf00) == 0) {
reg = ib[idx + 3] * 4;
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
case PACKET3_WRITE_DATA:
if ((idx_value & 0xf00) == 0) {
start_reg = ib[idx + 1] * 4;
if (idx_value & 0x10000) {
if (!si_vm_reg_valid(start_reg))
return -EINVAL;
} else {
for (i = 0; i < (pkt->count - 2); i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
}
}
break;
case PACKET3_COND_WRITE:
if (idx_value & 0x100) {
reg = ib[idx + 5] * 4;
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
case PACKET3_COPY_DW:
if (idx_value & 0x2) {
reg = ib[idx + 3] * 4;
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
case PACKET3_SET_CONFIG_REG:
start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
end_reg = 4 * pkt->count + start_reg - 4;
if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
(start_reg >= PACKET3_SET_CONFIG_REG_END) ||
(end_reg >= PACKET3_SET_CONFIG_REG_END)) {
DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
return -EINVAL;
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
default:
DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
}
return 0;
}
 
static int si_vm_packet3_compute_check(struct radeon_device *rdev,
u32 *ib, struct radeon_cs_packet *pkt)
{
u32 idx = pkt->idx + 1;
u32 idx_value = ib[idx];
u32 start_reg, reg, i;
 
switch (pkt->opcode) {
case PACKET3_NOP:
case PACKET3_SET_BASE:
case PACKET3_CLEAR_STATE:
case PACKET3_DISPATCH_DIRECT:
case PACKET3_DISPATCH_INDIRECT:
case PACKET3_ALLOC_GDS:
case PACKET3_WRITE_GDS_RAM:
case PACKET3_ATOMIC_GDS:
case PACKET3_ATOMIC:
case PACKET3_OCCLUSION_QUERY:
case PACKET3_SET_PREDICATION:
case PACKET3_COND_EXEC:
case PACKET3_PRED_EXEC:
case PACKET3_CONTEXT_CONTROL:
case PACKET3_STRMOUT_BUFFER_UPDATE:
case PACKET3_WAIT_REG_MEM:
case PACKET3_MEM_WRITE:
case PACKET3_PFP_SYNC_ME:
case PACKET3_SURFACE_SYNC:
case PACKET3_EVENT_WRITE:
case PACKET3_EVENT_WRITE_EOP:
case PACKET3_EVENT_WRITE_EOS:
case PACKET3_SET_CONTEXT_REG:
case PACKET3_SET_CONTEXT_REG_INDIRECT:
case PACKET3_SET_SH_REG:
case PACKET3_SET_SH_REG_OFFSET:
case PACKET3_INCREMENT_DE_COUNTER:
case PACKET3_WAIT_ON_CE_COUNTER:
case PACKET3_WAIT_ON_AVAIL_BUFFER:
case PACKET3_ME_WRITE:
break;
case PACKET3_COPY_DATA:
if ((idx_value & 0xf00) == 0) {
reg = ib[idx + 3] * 4;
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
case PACKET3_WRITE_DATA:
if ((idx_value & 0xf00) == 0) {
start_reg = ib[idx + 1] * 4;
if (idx_value & 0x10000) {
if (!si_vm_reg_valid(start_reg))
return -EINVAL;
} else {
for (i = 0; i < (pkt->count - 2); i++) {
reg = start_reg + (4 * i);
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
}
}
break;
case PACKET3_COND_WRITE:
if (idx_value & 0x100) {
reg = ib[idx + 5] * 4;
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
case PACKET3_COPY_DW:
if (idx_value & 0x2) {
reg = ib[idx + 3] * 4;
if (!si_vm_reg_valid(reg))
return -EINVAL;
}
break;
default:
DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt->opcode);
return -EINVAL;
}
return 0;
}
 
int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
{
int ret = 0;
u32 idx = 0;
struct radeon_cs_packet pkt;
 
do {
pkt.idx = idx;
pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
pkt.one_reg_wr = 0;
switch (pkt.type) {
case PACKET_TYPE0:
dev_err(rdev->dev, "Packet0 not allowed!\n");
ret = -EINVAL;
break;
case PACKET_TYPE2:
idx += 1;
break;
case PACKET_TYPE3:
pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
if (ib->is_const_ib)
ret = si_vm_packet3_ce_check(rdev, ib->ptr, &pkt);
else {
switch (ib->ring) {
case RADEON_RING_TYPE_GFX_INDEX:
ret = si_vm_packet3_gfx_check(rdev, ib->ptr, &pkt);
break;
case CAYMAN_RING_TYPE_CP1_INDEX:
case CAYMAN_RING_TYPE_CP2_INDEX:
ret = si_vm_packet3_compute_check(rdev, ib->ptr, &pkt);
break;
default:
dev_err(rdev->dev, "Non-PM4 ring %d !\n", ib->ring);
ret = -EINVAL;
break;
}
}
idx += pkt.count + 2;
break;
default:
dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
ret = -EINVAL;
break;
}
if (ret)
break;
} while (idx < ib->length_dw);
 
return ret;
}
 
/*
* vm
*/
int si_vm_init(struct radeon_device *rdev)
{
/* number of VMs */
rdev->vm_manager.nvm = 16;
/* base offset of vram pages */
rdev->vm_manager.vram_base_offset = 0;
 
return 0;
}
 
void si_vm_fini(struct radeon_device *rdev)
{
}
 
/**
* si_vm_set_page - update the page tables using the CP
*
* @rdev: radeon_device pointer
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
* @incr: increase next addr by incr bytes
* @flags: access flags
*
* Update the page tables using the CP (cayman-si).
*/
void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
uint64_t addr, unsigned count,
uint32_t incr, uint32_t flags)
{
struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
 
while (count) {
unsigned ndw = 2 + count * 2;
if (ndw > 0x3FFE)
ndw = 0x3FFE;
 
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(1)));
radeon_ring_write(ring, pe);
radeon_ring_write(ring, upper_32_bits(pe));
for (; ndw > 2; ndw -= 2, --count, pe += 8) {
uint64_t value;
if (flags & RADEON_VM_PAGE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & RADEON_VM_PAGE_VALID)
value = addr;
else
value = 0;
addr += incr;
value |= r600_flags;
radeon_ring_write(ring, value);
radeon_ring_write(ring, upper_32_bits(value));
}
}
}
 
void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
{
struct radeon_ring *ring = &rdev->ring[ridx];
 
if (vm == NULL)
return;
 
/* write new base address */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
 
if (vm->id < 8) {
radeon_ring_write(ring,
(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2);
} else {
radeon_ring_write(ring,
(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2);
}
radeon_ring_write(ring, 0);
radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
 
/* flush hdp cache */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, HDP_MEM_COHERENCY_FLUSH_CNTL >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 0x1);
 
/* bits 0-15 are the VM contexts0-15 */
radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
WRITE_DATA_DST_SEL(0)));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm->id);
 
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
}
 
/*
* RLC
*/
void si_rlc_fini(struct radeon_device *rdev)
{
int r;
 
/* save restore block */
if (rdev->rlc.save_restore_obj) {
r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC sr bo failed\n", r);
radeon_bo_unpin(rdev->rlc.save_restore_obj);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
 
radeon_bo_unref(&rdev->rlc.save_restore_obj);
rdev->rlc.save_restore_obj = NULL;
}
 
/* clear state block */
if (rdev->rlc.clear_state_obj) {
r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
if (unlikely(r != 0))
dev_warn(rdev->dev, "(%d) reserve RLC c bo failed\n", r);
radeon_bo_unpin(rdev->rlc.clear_state_obj);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
 
radeon_bo_unref(&rdev->rlc.clear_state_obj);
rdev->rlc.clear_state_obj = NULL;
}
}
 
int si_rlc_init(struct radeon_device *rdev)
{
int r;
 
/* save restore block */
if (rdev->rlc.save_restore_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC sr bo failed\n", r);
return r;
}
}
 
r = radeon_bo_reserve(rdev->rlc.save_restore_obj, false);
if (unlikely(r != 0)) {
si_rlc_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->rlc.save_restore_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.save_restore_gpu_addr);
radeon_bo_unreserve(rdev->rlc.save_restore_obj);
if (r) {
dev_warn(rdev->dev, "(%d) pin RLC sr bo failed\n", r);
si_rlc_fini(rdev);
return r;
}
 
/* clear state block */
if (rdev->rlc.clear_state_obj == NULL) {
r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM, NULL,
&rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) create RLC c bo failed\n", r);
si_rlc_fini(rdev);
return r;
}
}
r = radeon_bo_reserve(rdev->rlc.clear_state_obj, false);
if (unlikely(r != 0)) {
si_rlc_fini(rdev);
return r;
}
r = radeon_bo_pin(rdev->rlc.clear_state_obj, RADEON_GEM_DOMAIN_VRAM,
&rdev->rlc.clear_state_gpu_addr);
radeon_bo_unreserve(rdev->rlc.clear_state_obj);
if (r) {
dev_warn(rdev->dev, "(%d) pin RLC c bo failed\n", r);
si_rlc_fini(rdev);
return r;
}
 
return 0;
}
 
static void si_rlc_stop(struct radeon_device *rdev)
{
WREG32(RLC_CNTL, 0);
}
 
static void si_rlc_start(struct radeon_device *rdev)
{
WREG32(RLC_CNTL, RLC_ENABLE);
}
 
static int si_rlc_resume(struct radeon_device *rdev)
{
u32 i;
const __be32 *fw_data;
 
if (!rdev->rlc_fw)
return -EINVAL;
 
si_rlc_stop(rdev);
 
WREG32(RLC_RL_BASE, 0);
WREG32(RLC_RL_SIZE, 0);
WREG32(RLC_LB_CNTL, 0);
WREG32(RLC_LB_CNTR_MAX, 0xffffffff);
WREG32(RLC_LB_CNTR_INIT, 0);
 
WREG32(RLC_SAVE_AND_RESTORE_BASE, rdev->rlc.save_restore_gpu_addr >> 8);
WREG32(RLC_CLEAR_STATE_RESTORE_BASE, rdev->rlc.clear_state_gpu_addr >> 8);
 
WREG32(RLC_MC_CNTL, 0);
WREG32(RLC_UCODE_CNTL, 0);
 
fw_data = (const __be32 *)rdev->rlc_fw->data;
for (i = 0; i < SI_RLC_UCODE_SIZE; i++) {
WREG32(RLC_UCODE_ADDR, i);
WREG32(RLC_UCODE_DATA, be32_to_cpup(fw_data++));
}
WREG32(RLC_UCODE_ADDR, 0);
 
si_rlc_start(rdev);
 
return 0;
}
 
static void si_enable_interrupts(struct radeon_device *rdev)
{
u32 ih_cntl = RREG32(IH_CNTL);
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
 
ih_cntl |= ENABLE_INTR;
ih_rb_cntl |= IH_RB_ENABLE;
WREG32(IH_CNTL, ih_cntl);
WREG32(IH_RB_CNTL, ih_rb_cntl);
rdev->ih.enabled = true;
}
 
static void si_disable_interrupts(struct radeon_device *rdev)
{
u32 ih_rb_cntl = RREG32(IH_RB_CNTL);
u32 ih_cntl = RREG32(IH_CNTL);
 
ih_rb_cntl &= ~IH_RB_ENABLE;
ih_cntl &= ~ENABLE_INTR;
WREG32(IH_RB_CNTL, ih_rb_cntl);
WREG32(IH_CNTL, ih_cntl);
/* set rptr, wptr to 0 */
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
rdev->ih.enabled = false;
rdev->ih.rptr = 0;
}
 
static void si_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
 
WREG32(CP_INT_CNTL_RING0, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(CP_INT_CNTL_RING1, 0);
WREG32(CP_INT_CNTL_RING2, 0);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
}
if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
}
 
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
 
tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD1_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD2_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD3_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD4_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
WREG32(DC_HPD6_INT_CONTROL, tmp);
 
}
 
static int si_irq_init(struct radeon_device *rdev)
{
int ret = 0;
int rb_bufsz;
u32 interrupt_cntl, ih_cntl, ih_rb_cntl;
 
/* allocate ring */
ret = r600_ih_ring_alloc(rdev);
if (ret)
return ret;
 
/* disable irqs */
si_disable_interrupts(rdev);
 
/* init rlc */
ret = si_rlc_resume(rdev);
if (ret) {
r600_ih_ring_fini(rdev);
return ret;
}
 
/* setup interrupt control */
/* set dummy read address to ring address */
WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8);
interrupt_cntl = RREG32(INTERRUPT_CNTL);
/* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
* IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
*/
interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE;
/* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
interrupt_cntl &= ~IH_REQ_NONSNOOP_EN;
WREG32(INTERRUPT_CNTL, interrupt_cntl);
 
WREG32(IH_RB_BASE, rdev->ih.gpu_addr >> 8);
rb_bufsz = drm_order(rdev->ih.ring_size / 4);
 
ih_rb_cntl = (IH_WPTR_OVERFLOW_ENABLE |
IH_WPTR_OVERFLOW_CLEAR |
(rb_bufsz << 1));
 
if (rdev->wb.enabled)
ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;
 
/* set the writeback address whether it's enabled or not */
WREG32(IH_RB_WPTR_ADDR_LO, (rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFFFFFFFC);
WREG32(IH_RB_WPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + R600_WB_IH_WPTR_OFFSET) & 0xFF);
 
WREG32(IH_RB_CNTL, ih_rb_cntl);
 
/* set rptr, wptr to 0 */
WREG32(IH_RB_RPTR, 0);
WREG32(IH_RB_WPTR, 0);
 
/* Default settings for IH_CNTL (disabled at first) */
ih_cntl = MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
/* RPTR_REARM only works if msi's are enabled */
if (rdev->msi_enabled)
ih_cntl |= RPTR_REARM;
WREG32(IH_CNTL, ih_cntl);
 
/* force the active interrupt state to all disabled */
si_disable_interrupt_state(rdev);
 
pci_set_master(rdev->pdev);
 
/* enable irqs */
si_enable_interrupts(rdev);
 
return ret;
}
 
int si_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
 
if (!rdev->irq.installed) {
WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
return -EINVAL;
}
/* don't enable anything if the ih is disabled */
if (!rdev->ih.enabled) {
si_disable_interrupts(rdev);
/* force the active interrupt state to all disabled */
si_disable_interrupt_state(rdev);
return 0;
}
 
hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
 
/* enable CP interrupts on all rings */
if (atomic_read(&rdev->irq.ring_int[RADEON_RING_TYPE_GFX_INDEX])) {
DRM_DEBUG("si_irq_set: sw int gfx\n");
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP1_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp1\n");
cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
}
if (atomic_read(&rdev->irq.ring_int[CAYMAN_RING_TYPE_CP2_INDEX])) {
DRM_DEBUG("si_irq_set: sw int cp2\n");
cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
}
if (rdev->irq.crtc_vblank_int[0] ||
atomic_read(&rdev->irq.pflip[0])) {
DRM_DEBUG("si_irq_set: vblank 0\n");
crtc1 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[1] ||
atomic_read(&rdev->irq.pflip[1])) {
DRM_DEBUG("si_irq_set: vblank 1\n");
crtc2 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[2] ||
atomic_read(&rdev->irq.pflip[2])) {
DRM_DEBUG("si_irq_set: vblank 2\n");
crtc3 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[3] ||
atomic_read(&rdev->irq.pflip[3])) {
DRM_DEBUG("si_irq_set: vblank 3\n");
crtc4 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[4] ||
atomic_read(&rdev->irq.pflip[4])) {
DRM_DEBUG("si_irq_set: vblank 4\n");
crtc5 |= VBLANK_INT_MASK;
}
if (rdev->irq.crtc_vblank_int[5] ||
atomic_read(&rdev->irq.pflip[5])) {
DRM_DEBUG("si_irq_set: vblank 5\n");
crtc6 |= VBLANK_INT_MASK;
}
if (rdev->irq.hpd[0]) {
DRM_DEBUG("si_irq_set: hpd 1\n");
hpd1 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[1]) {
DRM_DEBUG("si_irq_set: hpd 2\n");
hpd2 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[2]) {
DRM_DEBUG("si_irq_set: hpd 3\n");
hpd3 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[3]) {
DRM_DEBUG("si_irq_set: hpd 4\n");
hpd4 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[4]) {
DRM_DEBUG("si_irq_set: hpd 5\n");
hpd5 |= DC_HPDx_INT_EN;
}
if (rdev->irq.hpd[5]) {
DRM_DEBUG("si_irq_set: hpd 6\n");
hpd6 |= DC_HPDx_INT_EN;
}
 
WREG32(CP_INT_CNTL_RING0, cp_int_cntl);
WREG32(CP_INT_CNTL_RING1, cp_int_cntl1);
WREG32(CP_INT_CNTL_RING2, cp_int_cntl2);
 
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
 
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
if (rdev->num_crtc >= 4) {
WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
}
if (rdev->num_crtc >= 6) {
WREG32(INT_MASK + EVERGREEN_CRTC4_REGISTER_OFFSET, crtc5);
WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
}
 
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
if (rdev->num_crtc >= 4) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
}
if (rdev->num_crtc >= 6) {
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, grph5);
WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
}
 
WREG32(DC_HPD1_INT_CONTROL, hpd1);
WREG32(DC_HPD2_INT_CONTROL, hpd2);
WREG32(DC_HPD3_INT_CONTROL, hpd3);
WREG32(DC_HPD4_INT_CONTROL, hpd4);
WREG32(DC_HPD5_INT_CONTROL, hpd5);
WREG32(DC_HPD6_INT_CONTROL, hpd6);
 
return 0;
}
 
static inline void si_irq_ack(struct radeon_device *rdev)
{
u32 tmp;
 
rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
rdev->irq.stat_regs.evergreen.disp_int_cont3 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE3);
rdev->irq.stat_regs.evergreen.disp_int_cont4 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE4);
rdev->irq.stat_regs.evergreen.disp_int_cont5 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE5);
rdev->irq.stat_regs.evergreen.d1grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d2grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET);
if (rdev->num_crtc >= 4) {
rdev->irq.stat_regs.evergreen.d3grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d4grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET);
}
if (rdev->num_crtc >= 6) {
rdev->irq.stat_regs.evergreen.d5grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET);
rdev->irq.stat_regs.evergreen.d6grph_int = RREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET);
}
 
if (rdev->irq.stat_regs.evergreen.d1grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d2grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC0_REGISTER_OFFSET, VLINE_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC1_REGISTER_OFFSET, VLINE_ACK);
 
if (rdev->num_crtc >= 4) {
if (rdev->irq.stat_regs.evergreen.d3grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d4grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC2_REGISTER_OFFSET, VLINE_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC3_REGISTER_OFFSET, VLINE_ACK);
}
 
if (rdev->num_crtc >= 6) {
if (rdev->irq.stat_regs.evergreen.d5grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.d6grph_int & GRPH_PFLIP_INT_OCCURRED)
WREG32(GRPH_INT_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, GRPH_PFLIP_INT_CLEAR);
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC4_REGISTER_OFFSET, VLINE_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT)
WREG32(VBLANK_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VBLANK_ACK);
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT)
WREG32(VLINE_STATUS + EVERGREEN_CRTC5_REGISTER_OFFSET, VLINE_ACK);
}
 
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
tmp = RREG32(DC_HPD1_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD1_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
tmp = RREG32(DC_HPD2_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD2_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
tmp = RREG32(DC_HPD3_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD3_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
tmp = RREG32(DC_HPD4_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD4_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
tmp = RREG32(DC_HPD5_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
}
 
static void si_irq_disable(struct radeon_device *rdev)
{
si_disable_interrupts(rdev);
/* Wait and acknowledge irq */
mdelay(1);
si_irq_ack(rdev);
si_disable_interrupt_state(rdev);
}
 
static void si_irq_suspend(struct radeon_device *rdev)
{
si_irq_disable(rdev);
si_rlc_stop(rdev);
}
 
static void si_irq_fini(struct radeon_device *rdev)
{
si_irq_suspend(rdev);
r600_ih_ring_fini(rdev);
}
 
static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
{
u32 wptr, tmp;
 
if (rdev->wb.enabled)
wptr = le32_to_cpu(rdev->wb.wb[R600_WB_IH_WPTR_OFFSET/4]);
else
wptr = RREG32(IH_RB_WPTR);
 
if (wptr & RB_OVERFLOW) {
/* When a ring buffer overflow happen start parsing interrupt
* from the last not overwritten vector (wptr + 16). Hopefully
* this should allow us to catchup.
*/
dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n",
wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask);
rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask;
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_WPTR_OVERFLOW_CLEAR;
WREG32(IH_RB_CNTL, tmp);
}
return (wptr & rdev->ih.ptr_mask);
}
 
/* SI IV Ring
* Each IV ring entry is 128 bits:
* [7:0] - interrupt source id
* [31:8] - reserved
* [59:32] - interrupt source data
* [63:60] - reserved
* [71:64] - RINGID
* [79:72] - VMID
* [127:80] - reserved
*/
int si_irq_process(struct radeon_device *rdev)
{
u32 wptr;
u32 rptr;
u32 src_id, src_data, ring_id;
u32 ring_index;
bool queue_hotplug = false;
 
if (!rdev->ih.enabled || rdev->shutdown)
return IRQ_NONE;
 
wptr = si_get_ih_wptr(rdev);
 
restart_ih:
/* is somebody else already processing irqs? */
if (atomic_xchg(&rdev->ih.lock, 1))
return IRQ_NONE;
 
rptr = rdev->ih.rptr;
DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
 
/* Order reading of wptr vs. reading of IH ring data */
rmb();
 
/* display interrupts */
si_irq_ack(rdev);
 
while (rptr != wptr) {
/* wptr/rptr are in bytes! */
ring_index = rptr / 4;
src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
 
switch (src_id) {
case 1: /* D1 vblank/vline */
switch (src_data) {
case 0: /* D1 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[0]) {
// drm_handle_vblank(rdev->ddev, 0);
rdev->pm.vblank_sync = true;
// wake_up(&rdev->irq.vblank_queue);
}
// if (atomic_read(&rdev->irq.pflip[0]))
// radeon_crtc_handle_flip(rdev, 0);
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
break;
case 1: /* D1 vline */
if (rdev->irq.stat_regs.evergreen.disp_int & LB_D1_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int &= ~LB_D1_VLINE_INTERRUPT;
DRM_DEBUG("IH: D1 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 2: /* D2 vblank/vline */
switch (src_data) {
case 0: /* D2 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[1]) {
// drm_handle_vblank(rdev->ddev, 1);
rdev->pm.vblank_sync = true;
// wake_up(&rdev->irq.vblank_queue);
}
// if (atomic_read(&rdev->irq.pflip[1]))
// radeon_crtc_handle_flip(rdev, 1);
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
break;
case 1: /* D2 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont & LB_D2_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~LB_D2_VLINE_INTERRUPT;
DRM_DEBUG("IH: D2 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 3: /* D3 vblank/vline */
switch (src_data) {
case 0: /* D3 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[2]) {
// drm_handle_vblank(rdev->ddev, 2);
rdev->pm.vblank_sync = true;
// wake_up(&rdev->irq.vblank_queue);
}
// if (atomic_read(&rdev->irq.pflip[2]))
// radeon_crtc_handle_flip(rdev, 2);
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D3 vblank\n");
}
break;
case 1: /* D3 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & LB_D3_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~LB_D3_VLINE_INTERRUPT;
DRM_DEBUG("IH: D3 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 4: /* D4 vblank/vline */
switch (src_data) {
case 0: /* D4 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[3]) {
// drm_handle_vblank(rdev->ddev, 3);
rdev->pm.vblank_sync = true;
// wake_up(&rdev->irq.vblank_queue);
}
// if (atomic_read(&rdev->irq.pflip[3]))
// radeon_crtc_handle_flip(rdev, 3);
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D4 vblank\n");
}
break;
case 1: /* D4 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & LB_D4_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~LB_D4_VLINE_INTERRUPT;
DRM_DEBUG("IH: D4 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 5: /* D5 vblank/vline */
switch (src_data) {
case 0: /* D5 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[4]) {
// drm_handle_vblank(rdev->ddev, 4);
rdev->pm.vblank_sync = true;
// wake_up(&rdev->irq.vblank_queue);
}
// if (atomic_read(&rdev->irq.pflip[4]))
// radeon_crtc_handle_flip(rdev, 4);
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D5 vblank\n");
}
break;
case 1: /* D5 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & LB_D5_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~LB_D5_VLINE_INTERRUPT;
DRM_DEBUG("IH: D5 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 6: /* D6 vblank/vline */
switch (src_data) {
case 0: /* D6 vblank */
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
if (rdev->irq.crtc_vblank_int[5]) {
// drm_handle_vblank(rdev->ddev, 5);
rdev->pm.vblank_sync = true;
// wake_up(&rdev->irq.vblank_queue);
}
// if (atomic_read(&rdev->irq.pflip[5]))
// radeon_crtc_handle_flip(rdev, 5);
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D6 vblank\n");
}
break;
case 1: /* D6 vline */
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & LB_D6_VLINE_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~LB_D6_VLINE_INTERRUPT;
DRM_DEBUG("IH: D6 vline\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 42: /* HPD hotplug */
switch (src_data) {
case 0:
if (rdev->irq.stat_regs.evergreen.disp_int & DC_HPD1_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int &= ~DC_HPD1_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD1\n");
}
break;
case 1:
if (rdev->irq.stat_regs.evergreen.disp_int_cont & DC_HPD2_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont &= ~DC_HPD2_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD2\n");
}
break;
case 2:
if (rdev->irq.stat_regs.evergreen.disp_int_cont2 & DC_HPD3_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont2 &= ~DC_HPD3_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD3\n");
}
break;
case 3:
if (rdev->irq.stat_regs.evergreen.disp_int_cont3 & DC_HPD4_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont3 &= ~DC_HPD4_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD4\n");
}
break;
case 4:
if (rdev->irq.stat_regs.evergreen.disp_int_cont4 & DC_HPD5_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont4 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 5:
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
rdev->irq.stat_regs.evergreen.disp_int_cont5 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
break;
case 176: /* RINGID0 CP_INT */
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 177: /* RINGID1 CP_INT */
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
break;
case 178: /* RINGID2 CP_INT */
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
switch (ring_id) {
case 0:
radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 1:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
break;
case 2:
radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
break;
}
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
break;
default:
DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
break;
}
 
/* wptr/rptr are in bytes! */
rptr += 16;
rptr &= rdev->ih.ptr_mask;
}
// if (queue_hotplug)
// schedule_work(&rdev->hotplug_work);
rdev->ih.rptr = rptr;
WREG32(IH_RB_RPTR, rdev->ih.rptr);
atomic_set(&rdev->ih.lock, 0);
 
/* make sure wptr hasn't changed while processing */
wptr = si_get_ih_wptr(rdev);
if (wptr != rptr)
goto restart_ih;
 
return IRQ_HANDLED;
}
 
/*
* startup/shutdown callbacks
*/
static int si_startup(struct radeon_device *rdev)
{
struct radeon_ring *ring;
int r;
 
if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw ||
!rdev->rlc_fw || !rdev->mc_fw) {
r = si_init_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load firmware!\n");
return r;
}
}
 
r = si_mc_load_microcode(rdev);
if (r) {
DRM_ERROR("Failed to load MC firmware!\n");
return r;
}
 
r = r600_vram_scratch_init(rdev);
if (r)
return r;
 
si_mc_program(rdev);
r = si_pcie_gart_enable(rdev);
if (r)
return r;
si_gpu_init(rdev);
 
#if 0
r = evergreen_blit_init(rdev);
if (r) {
r600_blit_fini(rdev);
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
#endif
/* allocate rlc buffers */
r = si_rlc_init(rdev);
if (r) {
DRM_ERROR("Failed to init rlc BOs!\n");
return r;
}
 
/* allocate wb buffer */
r = radeon_wb_init(rdev);
if (r)
return r;
 
r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
return r;
}
 
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
return r;
}
 
r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
if (r) {
dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
return r;
}
 
/* Enable IRQ */
r = si_irq_init(rdev);
if (r) {
DRM_ERROR("radeon: IH init failed (%d).\n", r);
radeon_irq_kms_fini(rdev);
return r;
}
si_irq_set(rdev);
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
CP_RB0_RPTR, CP_RB0_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP1_RPTR_OFFSET,
CP_RB1_RPTR, CP_RB1_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP2_RPTR_OFFSET,
CP_RB2_RPTR, CP_RB2_WPTR,
0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
 
r = si_cp_load_microcode(rdev);
if (r)
return r;
r = si_cp_resume(rdev);
if (r)
return r;
 
// r = radeon_ib_pool_init(rdev);
// if (r) {
// dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
// return r;
// }
 
// r = radeon_vm_manager_init(rdev);
// if (r) {
// dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
// return r;
// }
 
return 0;
}
 
 
/* Plan is to move initialization in that function and use
* helper function so that radeon_device_init pretty much
* do nothing more than calling asic specific function. This
* should also allow to remove a bunch of callback function
* like vram_info.
*/
int si_init(struct radeon_device *rdev)
{
struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
 
ENTER();
 
/* Read BIOS */
if (!radeon_get_bios(rdev)) {
if (ASIC_IS_AVIVO(rdev))
return -EINVAL;
}
/* Must be an ATOMBIOS */
if (!rdev->is_atom_bios) {
dev_err(rdev->dev, "Expecting atombios for cayman GPU\n");
return -EINVAL;
}
r = radeon_atombios_init(rdev);
if (r)
return r;
 
/* Post card if necessary */
if (!radeon_card_posted(rdev)) {
if (!rdev->bios) {
dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
return -EINVAL;
}
DRM_INFO("GPU not posted. posting now...\n");
atom_asic_init(rdev->mode_info.atom_context);
}
/* Initialize scratch registers */
si_scratch_init(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
/* Initialize clocks */
radeon_get_clock_info(rdev->ddev);
 
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
return r;
 
/* initialize memory controller */
r = si_mc_init(rdev);
if (r)
return r;
/* Memory manager */
r = radeon_bo_init(rdev);
if (r)
return r;
 
r = radeon_irq_kms_init(rdev);
if (r)
return r;
 
ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
 
ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
ring->ring_obj = NULL;
r600_ring_init(rdev, ring, 1024 * 1024);
 
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
 
r = r600_pcie_gart_init(rdev);
if (r)
return r;
 
rdev->accel_working = true;
r = si_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
// si_cp_fini(rdev);
// si_irq_fini(rdev);
// si_rlc_fini(rdev);
// radeon_wb_fini(rdev);
// radeon_ib_pool_fini(rdev);
// radeon_vm_manager_fini(rdev);
// radeon_irq_kms_fini(rdev);
// si_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
 
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
* is loaded are not suffient for advanced operations.
*/
if (!rdev->mc_fw) {
DRM_ERROR("radeon: MC ucode required for NI+.\n");
return -EINVAL;
}
LEAVE();
 
return 0;
}
 
/**
* si_get_gpu_clock - return GPU clock counter snapshot
*
* @rdev: radeon_device pointer
*
* Fetches a GPU clock counter snapshot (SI).
* Returns the 64 bit clock counter snapshot.
*/
uint64_t si_get_gpu_clock(struct radeon_device *rdev)
{
uint64_t clock;
 
mutex_lock(&rdev->gpu_clock_mutex);
WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
mutex_unlock(&rdev->gpu_clock_mutex);
return clock;
}
/drivers/video/drm/radeon/si_blit_shaders.c
0,0 → 1,253
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Alex Deucher <alexander.deucher@amd.com>
*/
 
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/kernel.h>
 
const u32 si_default_state[] =
{
0xc0066900,
0x00000000,
0x00000060, /* DB_RENDER_CONTROL */
0x00000000, /* DB_COUNT_CONTROL */
0x00000000, /* DB_DEPTH_VIEW */
0x0000002a, /* DB_RENDER_OVERRIDE */
0x00000000, /* DB_RENDER_OVERRIDE2 */
0x00000000, /* DB_HTILE_DATA_BASE */
 
0xc0046900,
0x00000008,
0x00000000, /* DB_DEPTH_BOUNDS_MIN */
0x00000000, /* DB_DEPTH_BOUNDS_MAX */
0x00000000, /* DB_STENCIL_CLEAR */
0x00000000, /* DB_DEPTH_CLEAR */
 
0xc0036900,
0x0000000f,
0x00000000, /* DB_DEPTH_INFO */
0x00000000, /* DB_Z_INFO */
0x00000000, /* DB_STENCIL_INFO */
 
0xc0016900,
0x00000080,
0x00000000, /* PA_SC_WINDOW_OFFSET */
 
0xc00d6900,
0x00000083,
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
0x00000000, /* PA_SC_CLIPRECT_0_TL */
0x20002000, /* PA_SC_CLIPRECT_0_BR */
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0x00000000,
0x20002000,
0xaaaaaaaa, /* PA_SC_EDGERULE */
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
0x0000000f, /* CB_TARGET_MASK */
0x0000000f, /* CB_SHADER_MASK */
 
0xc0226900,
0x00000094,
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x80000000,
0x20002000,
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
 
0xc0026900,
0x000000d9,
0x00000000, /* CP_RINGID */
0x00000000, /* CP_VMID */
 
0xc0046900,
0x00000100,
0xffffffff, /* VGT_MAX_VTX_INDX */
0x00000000, /* VGT_MIN_VTX_INDX */
0x00000000, /* VGT_INDX_OFFSET */
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
 
0xc0046900,
0x00000105,
0x00000000, /* CB_BLEND_RED */
0x00000000, /* CB_BLEND_GREEN */
0x00000000, /* CB_BLEND_BLUE */
0x00000000, /* CB_BLEND_ALPHA */
 
0xc0016900,
0x000001e0,
0x00000000, /* CB_BLEND0_CONTROL */
 
0xc00e6900,
0x00000200,
0x00000000, /* DB_DEPTH_CONTROL */
0x00000000, /* DB_EQAA */
0x00cc0010, /* CB_COLOR_CONTROL */
0x00000210, /* DB_SHADER_CONTROL */
0x00010000, /* PA_CL_CLIP_CNTL */
0x00000004, /* PA_SU_SC_MODE_CNTL */
0x00000100, /* PA_CL_VTE_CNTL */
0x00000000, /* PA_CL_VS_OUT_CNTL */
0x00000000, /* PA_CL_NANINF_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
0x00000000, /* */
0x00000000, /* */
 
0xc0116900,
0x00000280,
0x00000000, /* PA_SU_POINT_SIZE */
0x00000000, /* PA_SU_POINT_MINMAX */
0x00000008, /* PA_SU_LINE_CNTL */
0x00000000, /* PA_SC_LINE_STIPPLE */
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
0x00000000, /* VGT_HOS_CNTL */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000, /* VGT_GS_MODE */
 
0xc0026900,
0x00000292,
0x00000000, /* PA_SC_MODE_CNTL_0 */
0x00000000, /* PA_SC_MODE_CNTL_1 */
 
0xc0016900,
0x000002a1,
0x00000000, /* VGT_PRIMITIVEID_EN */
 
0xc0016900,
0x000002a5,
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
 
0xc0026900,
0x000002a8,
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
0x00000000,
 
0xc0026900,
0x000002ad,
0x00000000, /* VGT_REUSE_OFF */
0x00000000,
 
0xc0016900,
0x000002d5,
0x00000000, /* VGT_SHADER_STAGES_EN */
 
0xc0016900,
0x000002dc,
0x0000aa00, /* DB_ALPHA_TO_MASK */
 
0xc0066900,
0x000002de,
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
 
0xc0026900,
0x000002e5,
0x00000000, /* VGT_STRMOUT_CONFIG */
0x00000000,
 
0xc01b6900,
0x000002f5,
0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
0x00000000, /* PA_SC_LINE_CNTL */
0x00000000, /* PA_SC_AA_CONFIG */
0x00000005, /* PA_SU_VTX_CNTL */
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0x00000000,
0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
0xffffffff,
 
0xc0026900,
0x00000316,
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
0x00000010, /* */
};
 
const u32 si_default_size = ARRAY_SIZE(si_default_state);
/drivers/video/drm/radeon/si_blit_shaders.h
0,0 → 1,32
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef SI_BLIT_SHADERS_H
#define SI_BLIT_SHADERS_H
 
extern const u32 si_default_state[];
 
extern const u32 si_default_size;
 
#endif
/drivers/video/drm/radeon/si_reg.h
0,0 → 1,105
/*
* Copyright 2010 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#ifndef __SI_REG_H__
#define __SI_REG_H__
 
/* SI */
#define SI_DC_GPIO_HPD_MASK 0x65b0
#define SI_DC_GPIO_HPD_A 0x65b4
#define SI_DC_GPIO_HPD_EN 0x65b8
#define SI_DC_GPIO_HPD_Y 0x65bc
 
#define SI_GRPH_CONTROL 0x6804
# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
# define SI_GRPH_DEPTH_8BPP 0
# define SI_GRPH_DEPTH_16BPP 1
# define SI_GRPH_DEPTH_32BPP 2
# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
# define SI_ADDR_SURF_2_BANK 0
# define SI_ADDR_SURF_4_BANK 1
# define SI_ADDR_SURF_8_BANK 2
# define SI_ADDR_SURF_16_BANK 3
# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
# define SI_ADDR_SURF_BANK_WIDTH_1 0
# define SI_ADDR_SURF_BANK_WIDTH_2 1
# define SI_ADDR_SURF_BANK_WIDTH_4 2
# define SI_ADDR_SURF_BANK_WIDTH_8 3
# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define SI_GRPH_FORMAT_INDEXED 0
/* 16 BPP */
# define SI_GRPH_FORMAT_ARGB1555 0
# define SI_GRPH_FORMAT_ARGB565 1
# define SI_GRPH_FORMAT_ARGB4444 2
# define SI_GRPH_FORMAT_AI88 3
# define SI_GRPH_FORMAT_MONO16 4
# define SI_GRPH_FORMAT_BGRA5551 5
/* 32 BPP */
# define SI_GRPH_FORMAT_ARGB8888 0
# define SI_GRPH_FORMAT_ARGB2101010 1
# define SI_GRPH_FORMAT_32BPP_DIG 2
# define SI_GRPH_FORMAT_8B_ARGB2101010 3
# define SI_GRPH_FORMAT_BGRA1010102 4
# define SI_GRPH_FORMAT_8B_BGRA1010102 5
# define SI_GRPH_FORMAT_RGB111110 6
# define SI_GRPH_FORMAT_BGR101111 7
# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
# define SI_ADDR_SURF_BANK_HEIGHT_1 0
# define SI_ADDR_SURF_BANK_HEIGHT_2 1
# define SI_ADDR_SURF_BANK_HEIGHT_4 2
# define SI_ADDR_SURF_BANK_HEIGHT_8 3
# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
# define SI_ADDR_SURF_TILE_SPLIT_64B 0
# define SI_ADDR_SURF_TILE_SPLIT_128B 1
# define SI_ADDR_SURF_TILE_SPLIT_256B 2
# define SI_ADDR_SURF_TILE_SPLIT_512B 3
# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
# define SI_ADDR_SURF_P2 0
# define SI_ADDR_SURF_P4_8x16 4
# define SI_ADDR_SURF_P4_16x16 5
# define SI_ADDR_SURF_P4_16x32 6
# define SI_ADDR_SURF_P4_32x32 7
# define SI_ADDR_SURF_P8_16x16_8x16 8
# define SI_ADDR_SURF_P8_16x32_8x16 9
# define SI_ADDR_SURF_P8_32x32_8x16 10
# define SI_ADDR_SURF_P8_16x32_16x16 11
# define SI_ADDR_SURF_P8_32x32_16x16 12
# define SI_ADDR_SURF_P8_32x32_16x32 13
# define SI_ADDR_SURF_P8_32x64_32x32 14
 
#endif
/drivers/video/drm/radeon/sid.h
0,0 → 1,924
/*
* Copyright 2011 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Alex Deucher
*/
#ifndef SI_H
#define SI_H
 
#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
 
#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
 
#define CG_MULT_THERMAL_STATUS 0x714
#define ASIC_MAX_TEMP(x) ((x) << 0)
#define ASIC_MAX_TEMP_MASK 0x000001ff
#define ASIC_MAX_TEMP_SHIFT 0
#define CTF_TEMP(x) ((x) << 9)
#define CTF_TEMP_MASK 0x0003fe00
#define CTF_TEMP_SHIFT 9
 
#define SI_MAX_SH_GPRS 256
#define SI_MAX_TEMP_GPRS 16
#define SI_MAX_SH_THREADS 256
#define SI_MAX_SH_STACK_ENTRIES 4096
#define SI_MAX_FRC_EOV_CNT 16384
#define SI_MAX_BACKENDS 8
#define SI_MAX_BACKENDS_MASK 0xFF
#define SI_MAX_BACKENDS_PER_SE_MASK 0x0F
#define SI_MAX_SIMDS 12
#define SI_MAX_SIMDS_MASK 0x0FFF
#define SI_MAX_SIMDS_PER_SE_MASK 0x00FF
#define SI_MAX_PIPES 8
#define SI_MAX_PIPES_MASK 0xFF
#define SI_MAX_PIPES_PER_SIMD_MASK 0x3F
#define SI_MAX_LDS_NUM 0xFFFF
#define SI_MAX_TCC 16
#define SI_MAX_TCC_MASK 0xFFFF
 
#define VGA_HDP_CONTROL 0x328
#define VGA_MEMORY_DISABLE (1 << 4)
 
#define DMIF_ADDR_CONFIG 0xBD4
 
#define SRBM_STATUS 0xE50
 
#define CC_SYS_RB_BACKEND_DISABLE 0xe80
#define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84
 
#define VM_L2_CNTL 0x1400
#define ENABLE_L2_CACHE (1 << 0)
#define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1)
#define L2_CACHE_PTE_ENDIAN_SWAP_MODE(x) ((x) << 2)
#define L2_CACHE_PDE_ENDIAN_SWAP_MODE(x) ((x) << 4)
#define ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE (1 << 9)
#define ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE (1 << 10)
#define EFFECTIVE_L2_QUEUE_SIZE(x) (((x) & 7) << 15)
#define CONTEXT1_IDENTITY_ACCESS_MODE(x) (((x) & 3) << 19)
#define VM_L2_CNTL2 0x1404
#define INVALIDATE_ALL_L1_TLBS (1 << 0)
#define INVALIDATE_L2_CACHE (1 << 1)
#define INVALIDATE_CACHE_MODE(x) ((x) << 26)
#define INVALIDATE_PTE_AND_PDE_CACHES 0
#define INVALIDATE_ONLY_PTE_CACHES 1
#define INVALIDATE_ONLY_PDE_CACHES 2
#define VM_L2_CNTL3 0x1408
#define BANK_SELECT(x) ((x) << 0)
#define L2_CACHE_UPDATE_MODE(x) ((x) << 6)
#define L2_CACHE_BIGK_FRAGMENT_SIZE(x) ((x) << 15)
#define L2_CACHE_BIGK_ASSOCIATIVITY (1 << 20)
#define VM_L2_STATUS 0x140C
#define L2_BUSY (1 << 0)
#define VM_CONTEXT0_CNTL 0x1410
#define ENABLE_CONTEXT (1 << 0)
#define PAGE_TABLE_DEPTH(x) (((x) & 3) << 1)
#define RANGE_PROTECTION_FAULT_ENABLE_DEFAULT (1 << 4)
#define VM_CONTEXT1_CNTL 0x1414
#define VM_CONTEXT0_CNTL2 0x1430
#define VM_CONTEXT1_CNTL2 0x1434
#define VM_CONTEXT8_PAGE_TABLE_BASE_ADDR 0x1438
#define VM_CONTEXT9_PAGE_TABLE_BASE_ADDR 0x143c
#define VM_CONTEXT10_PAGE_TABLE_BASE_ADDR 0x1440
#define VM_CONTEXT11_PAGE_TABLE_BASE_ADDR 0x1444
#define VM_CONTEXT12_PAGE_TABLE_BASE_ADDR 0x1448
#define VM_CONTEXT13_PAGE_TABLE_BASE_ADDR 0x144c
#define VM_CONTEXT14_PAGE_TABLE_BASE_ADDR 0x1450
#define VM_CONTEXT15_PAGE_TABLE_BASE_ADDR 0x1454
 
#define VM_INVALIDATE_REQUEST 0x1478
#define VM_INVALIDATE_RESPONSE 0x147c
 
#define VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR 0x1518
#define VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR 0x151c
 
#define VM_CONTEXT0_PAGE_TABLE_BASE_ADDR 0x153c
#define VM_CONTEXT1_PAGE_TABLE_BASE_ADDR 0x1540
#define VM_CONTEXT2_PAGE_TABLE_BASE_ADDR 0x1544
#define VM_CONTEXT3_PAGE_TABLE_BASE_ADDR 0x1548
#define VM_CONTEXT4_PAGE_TABLE_BASE_ADDR 0x154c
#define VM_CONTEXT5_PAGE_TABLE_BASE_ADDR 0x1550
#define VM_CONTEXT6_PAGE_TABLE_BASE_ADDR 0x1554
#define VM_CONTEXT7_PAGE_TABLE_BASE_ADDR 0x1558
#define VM_CONTEXT0_PAGE_TABLE_START_ADDR 0x155c
#define VM_CONTEXT1_PAGE_TABLE_START_ADDR 0x1560
 
#define VM_CONTEXT0_PAGE_TABLE_END_ADDR 0x157C
#define VM_CONTEXT1_PAGE_TABLE_END_ADDR 0x1580
 
#define MC_SHARED_CHMAP 0x2004
#define NOOFCHAN_SHIFT 12
#define NOOFCHAN_MASK 0x0000f000
#define MC_SHARED_CHREMAP 0x2008
 
#define MC_VM_FB_LOCATION 0x2024
#define MC_VM_AGP_TOP 0x2028
#define MC_VM_AGP_BOT 0x202C
#define MC_VM_AGP_BASE 0x2030
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
 
#define MC_VM_MX_L1_TLB_CNTL 0x2064
#define ENABLE_L1_TLB (1 << 0)
#define ENABLE_L1_FRAGMENT_PROCESSING (1 << 1)
#define SYSTEM_ACCESS_MODE_PA_ONLY (0 << 3)
#define SYSTEM_ACCESS_MODE_USE_SYS_MAP (1 << 3)
#define SYSTEM_ACCESS_MODE_IN_SYS (2 << 3)
#define SYSTEM_ACCESS_MODE_NOT_IN_SYS (3 << 3)
#define SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU (0 << 5)
#define ENABLE_ADVANCED_DRIVER_MODEL (1 << 6)
 
#define MC_SHARED_BLACKOUT_CNTL 0x20ac
 
#define MC_ARB_RAMCFG 0x2760
#define NOOFBANK_SHIFT 0
#define NOOFBANK_MASK 0x00000003
#define NOOFRANK_SHIFT 2
#define NOOFRANK_MASK 0x00000004
#define NOOFROWS_SHIFT 3
#define NOOFROWS_MASK 0x00000038
#define NOOFCOLS_SHIFT 6
#define NOOFCOLS_MASK 0x000000C0
#define CHANSIZE_SHIFT 8
#define CHANSIZE_MASK 0x00000100
#define CHANSIZE_OVERRIDE (1 << 11)
#define NOOFGROUPS_SHIFT 12
#define NOOFGROUPS_MASK 0x00001000
 
#define MC_SEQ_TRAIN_WAKEUP_CNTL 0x2808
#define TRAIN_DONE_D0 (1 << 30)
#define TRAIN_DONE_D1 (1 << 31)
 
#define MC_SEQ_SUP_CNTL 0x28c8
#define RUN_MASK (1 << 0)
#define MC_SEQ_SUP_PGM 0x28cc
 
#define MC_IO_PAD_CNTL_D0 0x29d0
#define MEM_FALL_OUT_CMD (1 << 8)
 
#define MC_SEQ_IO_DEBUG_INDEX 0x2a44
#define MC_SEQ_IO_DEBUG_DATA 0x2a48
 
#define HDP_HOST_PATH_CNTL 0x2C00
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
 
#define HDP_ADDR_CONFIG 0x2F48
#define HDP_MISC_CNTL 0x2F4C
#define HDP_FLUSH_INVALIDATE_CACHE (1 << 0)
 
#define IH_RB_CNTL 0x3e00
# define IH_RB_ENABLE (1 << 0)
# define IH_IB_SIZE(x) ((x) << 1) /* log2 */
# define IH_RB_FULL_DRAIN_ENABLE (1 << 6)
# define IH_WPTR_WRITEBACK_ENABLE (1 << 8)
# define IH_WPTR_WRITEBACK_TIMER(x) ((x) << 9) /* log2 */
# define IH_WPTR_OVERFLOW_ENABLE (1 << 16)
# define IH_WPTR_OVERFLOW_CLEAR (1 << 31)
#define IH_RB_BASE 0x3e04
#define IH_RB_RPTR 0x3e08
#define IH_RB_WPTR 0x3e0c
# define RB_OVERFLOW (1 << 0)
# define WPTR_OFFSET_MASK 0x3fffc
#define IH_RB_WPTR_ADDR_HI 0x3e10
#define IH_RB_WPTR_ADDR_LO 0x3e14
#define IH_CNTL 0x3e18
# define ENABLE_INTR (1 << 0)
# define IH_MC_SWAP(x) ((x) << 1)
# define IH_MC_SWAP_NONE 0
# define IH_MC_SWAP_16BIT 1
# define IH_MC_SWAP_32BIT 2
# define IH_MC_SWAP_64BIT 3
# define RPTR_REARM (1 << 4)
# define MC_WRREQ_CREDIT(x) ((x) << 15)
# define MC_WR_CLEAN_CNT(x) ((x) << 20)
# define MC_VMID(x) ((x) << 25)
 
#define CONFIG_MEMSIZE 0x5428
 
#define INTERRUPT_CNTL 0x5468
# define IH_DUMMY_RD_OVERRIDE (1 << 0)
# define IH_DUMMY_RD_EN (1 << 1)
# define IH_REQ_NONSNOOP_EN (1 << 3)
# define GEN_IH_INT_EN (1 << 8)
#define INTERRUPT_CNTL2 0x546c
 
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
 
#define BIF_FB_EN 0x5490
#define FB_READ_EN (1 << 0)
#define FB_WRITE_EN (1 << 1)
 
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
 
#define DC_LB_MEMORY_SPLIT 0x6b0c
#define DC_LB_MEMORY_CONFIG(x) ((x) << 20)
 
#define PRIORITY_A_CNT 0x6b18
#define PRIORITY_MARK_MASK 0x7fff
#define PRIORITY_OFF (1 << 16)
#define PRIORITY_ALWAYS_ON (1 << 20)
#define PRIORITY_B_CNT 0x6b1c
 
#define DPG_PIPE_ARBITRATION_CONTROL3 0x6cc8
# define LATENCY_WATERMARK_MASK(x) ((x) << 16)
#define DPG_PIPE_LATENCY_CONTROL 0x6ccc
# define LATENCY_LOW_WATERMARK(x) ((x) << 0)
# define LATENCY_HIGH_WATERMARK(x) ((x) << 16)
 
/* 0x6bb8, 0x77b8, 0x103b8, 0x10fb8, 0x11bb8, 0x127b8 */
#define VLINE_STATUS 0x6bb8
# define VLINE_OCCURRED (1 << 0)
# define VLINE_ACK (1 << 4)
# define VLINE_STAT (1 << 12)
# define VLINE_INTERRUPT (1 << 16)
# define VLINE_INTERRUPT_TYPE (1 << 17)
/* 0x6bbc, 0x77bc, 0x103bc, 0x10fbc, 0x11bbc, 0x127bc */
#define VBLANK_STATUS 0x6bbc
# define VBLANK_OCCURRED (1 << 0)
# define VBLANK_ACK (1 << 4)
# define VBLANK_STAT (1 << 12)
# define VBLANK_INTERRUPT (1 << 16)
# define VBLANK_INTERRUPT_TYPE (1 << 17)
 
/* 0x6b40, 0x7740, 0x10340, 0x10f40, 0x11b40, 0x12740 */
#define INT_MASK 0x6b40
# define VBLANK_INT_MASK (1 << 0)
# define VLINE_INT_MASK (1 << 4)
 
#define DISP_INTERRUPT_STATUS 0x60f4
# define LB_D1_VLINE_INTERRUPT (1 << 2)
# define LB_D1_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD1_INTERRUPT (1 << 17)
# define DC_HPD1_RX_INTERRUPT (1 << 18)
# define DACA_AUTODETECT_INTERRUPT (1 << 22)
# define DACB_AUTODETECT_INTERRUPT (1 << 23)
# define DC_I2C_SW_DONE_INTERRUPT (1 << 24)
# define DC_I2C_HW_DONE_INTERRUPT (1 << 25)
#define DISP_INTERRUPT_STATUS_CONTINUE 0x60f8
# define LB_D2_VLINE_INTERRUPT (1 << 2)
# define LB_D2_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD2_INTERRUPT (1 << 17)
# define DC_HPD2_RX_INTERRUPT (1 << 18)
# define DISP_TIMER_INTERRUPT (1 << 24)
#define DISP_INTERRUPT_STATUS_CONTINUE2 0x60fc
# define LB_D3_VLINE_INTERRUPT (1 << 2)
# define LB_D3_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD3_INTERRUPT (1 << 17)
# define DC_HPD3_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE3 0x6100
# define LB_D4_VLINE_INTERRUPT (1 << 2)
# define LB_D4_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD4_INTERRUPT (1 << 17)
# define DC_HPD4_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE4 0x614c
# define LB_D5_VLINE_INTERRUPT (1 << 2)
# define LB_D5_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD5_INTERRUPT (1 << 17)
# define DC_HPD5_RX_INTERRUPT (1 << 18)
#define DISP_INTERRUPT_STATUS_CONTINUE5 0x6150
# define LB_D6_VLINE_INTERRUPT (1 << 2)
# define LB_D6_VBLANK_INTERRUPT (1 << 3)
# define DC_HPD6_INTERRUPT (1 << 17)
# define DC_HPD6_RX_INTERRUPT (1 << 18)
 
/* 0x6858, 0x7458, 0x10058, 0x10c58, 0x11858, 0x12458 */
#define GRPH_INT_STATUS 0x6858
# define GRPH_PFLIP_INT_OCCURRED (1 << 0)
# define GRPH_PFLIP_INT_CLEAR (1 << 8)
/* 0x685c, 0x745c, 0x1005c, 0x10c5c, 0x1185c, 0x1245c */
#define GRPH_INT_CONTROL 0x685c
# define GRPH_PFLIP_INT_MASK (1 << 0)
# define GRPH_PFLIP_INT_TYPE (1 << 8)
 
#define DACA_AUTODETECT_INT_CONTROL 0x66c8
 
#define DC_HPD1_INT_STATUS 0x601c
#define DC_HPD2_INT_STATUS 0x6028
#define DC_HPD3_INT_STATUS 0x6034
#define DC_HPD4_INT_STATUS 0x6040
#define DC_HPD5_INT_STATUS 0x604c
#define DC_HPD6_INT_STATUS 0x6058
# define DC_HPDx_INT_STATUS (1 << 0)
# define DC_HPDx_SENSE (1 << 1)
# define DC_HPDx_RX_INT_STATUS (1 << 8)
 
#define DC_HPD1_INT_CONTROL 0x6020
#define DC_HPD2_INT_CONTROL 0x602c
#define DC_HPD3_INT_CONTROL 0x6038
#define DC_HPD4_INT_CONTROL 0x6044
#define DC_HPD5_INT_CONTROL 0x6050
#define DC_HPD6_INT_CONTROL 0x605c
# define DC_HPDx_INT_ACK (1 << 0)
# define DC_HPDx_INT_POLARITY (1 << 8)
# define DC_HPDx_INT_EN (1 << 16)
# define DC_HPDx_RX_INT_ACK (1 << 20)
# define DC_HPDx_RX_INT_EN (1 << 24)
 
#define DC_HPD1_CONTROL 0x6024
#define DC_HPD2_CONTROL 0x6030
#define DC_HPD3_CONTROL 0x603c
#define DC_HPD4_CONTROL 0x6048
#define DC_HPD5_CONTROL 0x6054
#define DC_HPD6_CONTROL 0x6060
# define DC_HPDx_CONNECTION_TIMER(x) ((x) << 0)
# define DC_HPDx_RX_INT_TIMER(x) ((x) << 16)
# define DC_HPDx_EN (1 << 28)
 
/* 0x6e98, 0x7a98, 0x10698, 0x11298, 0x11e98, 0x12a98 */
#define CRTC_STATUS_FRAME_COUNT 0x6e98
 
#define GRBM_CNTL 0x8000
#define GRBM_READ_TIMEOUT(x) ((x) << 0)
 
#define GRBM_STATUS2 0x8008
#define RLC_RQ_PENDING (1 << 0)
#define RLC_BUSY (1 << 8)
#define TC_BUSY (1 << 9)
 
#define GRBM_STATUS 0x8010
#define CMDFIFO_AVAIL_MASK 0x0000000F
#define RING2_RQ_PENDING (1 << 4)
#define SRBM_RQ_PENDING (1 << 5)
#define RING1_RQ_PENDING (1 << 6)
#define CF_RQ_PENDING (1 << 7)
#define PF_RQ_PENDING (1 << 8)
#define GDS_DMA_RQ_PENDING (1 << 9)
#define GRBM_EE_BUSY (1 << 10)
#define DB_CLEAN (1 << 12)
#define CB_CLEAN (1 << 13)
#define TA_BUSY (1 << 14)
#define GDS_BUSY (1 << 15)
#define VGT_BUSY (1 << 17)
#define IA_BUSY_NO_DMA (1 << 18)
#define IA_BUSY (1 << 19)
#define SX_BUSY (1 << 20)
#define SPI_BUSY (1 << 22)
#define BCI_BUSY (1 << 23)
#define SC_BUSY (1 << 24)
#define PA_BUSY (1 << 25)
#define DB_BUSY (1 << 26)
#define CP_COHERENCY_BUSY (1 << 28)
#define CP_BUSY (1 << 29)
#define CB_BUSY (1 << 30)
#define GUI_ACTIVE (1 << 31)
#define GRBM_STATUS_SE0 0x8014
#define GRBM_STATUS_SE1 0x8018
#define SE_DB_CLEAN (1 << 1)
#define SE_CB_CLEAN (1 << 2)
#define SE_BCI_BUSY (1 << 22)
#define SE_VGT_BUSY (1 << 23)
#define SE_PA_BUSY (1 << 24)
#define SE_TA_BUSY (1 << 25)
#define SE_SX_BUSY (1 << 26)
#define SE_SPI_BUSY (1 << 27)
#define SE_SC_BUSY (1 << 29)
#define SE_DB_BUSY (1 << 30)
#define SE_CB_BUSY (1 << 31)
 
#define GRBM_SOFT_RESET 0x8020
#define SOFT_RESET_CP (1 << 0)
#define SOFT_RESET_CB (1 << 1)
#define SOFT_RESET_RLC (1 << 2)
#define SOFT_RESET_DB (1 << 3)
#define SOFT_RESET_GDS (1 << 4)
#define SOFT_RESET_PA (1 << 5)
#define SOFT_RESET_SC (1 << 6)
#define SOFT_RESET_BCI (1 << 7)
#define SOFT_RESET_SPI (1 << 8)
#define SOFT_RESET_SX (1 << 10)
#define SOFT_RESET_TC (1 << 11)
#define SOFT_RESET_TA (1 << 12)
#define SOFT_RESET_VGT (1 << 14)
#define SOFT_RESET_IA (1 << 15)
 
#define GRBM_GFX_INDEX 0x802C
#define INSTANCE_INDEX(x) ((x) << 0)
#define SH_INDEX(x) ((x) << 8)
#define SE_INDEX(x) ((x) << 16)
#define SH_BROADCAST_WRITES (1 << 29)
#define INSTANCE_BROADCAST_WRITES (1 << 30)
#define SE_BROADCAST_WRITES (1 << 31)
 
#define GRBM_INT_CNTL 0x8060
# define RDERR_INT_ENABLE (1 << 0)
# define GUI_IDLE_INT_ENABLE (1 << 19)
 
#define SCRATCH_REG0 0x8500
#define SCRATCH_REG1 0x8504
#define SCRATCH_REG2 0x8508
#define SCRATCH_REG3 0x850C
#define SCRATCH_REG4 0x8510
#define SCRATCH_REG5 0x8514
#define SCRATCH_REG6 0x8518
#define SCRATCH_REG7 0x851C
 
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
 
#define CP_SEM_WAIT_TIMER 0x85BC
 
#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
 
#define CP_ME_CNTL 0x86D8
#define CP_CE_HALT (1 << 24)
#define CP_PFP_HALT (1 << 26)
#define CP_ME_HALT (1 << 28)
 
#define CP_COHER_CNTL2 0x85E8
 
#define CP_RB2_RPTR 0x86f8
#define CP_RB1_RPTR 0x86fc
#define CP_RB0_RPTR 0x8700
#define CP_RB_WPTR_DELAY 0x8704
 
#define CP_QUEUE_THRESHOLDS 0x8760
#define ROQ_IB1_START(x) ((x) << 0)
#define ROQ_IB2_START(x) ((x) << 8)
#define CP_MEQ_THRESHOLDS 0x8764
#define MEQ1_START(x) ((x) << 0)
#define MEQ2_START(x) ((x) << 8)
 
#define CP_PERFMON_CNTL 0x87FC
 
#define VGT_VTX_VECT_EJECT_REG 0x88B0
 
#define VGT_CACHE_INVALIDATION 0x88C4
#define CACHE_INVALIDATION(x) ((x) << 0)
#define VC_ONLY 0
#define TC_ONLY 1
#define VC_AND_TC 2
#define AUTO_INVLD_EN(x) ((x) << 6)
#define NO_AUTO 0
#define ES_AUTO 1
#define GS_AUTO 2
#define ES_AND_GS_AUTO 3
#define VGT_ESGS_RING_SIZE 0x88C8
#define VGT_GSVS_RING_SIZE 0x88CC
 
#define VGT_GS_VERTEX_REUSE 0x88D4
 
#define VGT_PRIMITIVE_TYPE 0x8958
#define VGT_INDEX_TYPE 0x895C
 
#define VGT_NUM_INDICES 0x8970
#define VGT_NUM_INSTANCES 0x8974
 
#define VGT_TF_RING_SIZE 0x8988
 
#define VGT_HS_OFFCHIP_PARAM 0x89B0
 
#define VGT_TF_MEMORY_BASE 0x89B8
 
#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
#define INACTIVE_CUS_MASK 0xFFFF0000
#define INACTIVE_CUS_SHIFT 16
#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
 
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
 
#define PA_SU_LINE_STIPPLE_VALUE 0x8A60
 
#define PA_SC_LINE_STIPPLE_STATE 0x8B10
 
#define PA_SC_FORCE_EOV_MAX_CNTS 0x8B24
#define FORCE_EOV_MAX_CLK_CNT(x) ((x) << 0)
#define FORCE_EOV_MAX_REZ_CNT(x) ((x) << 16)
 
#define PA_SC_FIFO_SIZE 0x8BCC
#define SC_FRONTEND_PRIM_FIFO_SIZE(x) ((x) << 0)
#define SC_BACKEND_PRIM_FIFO_SIZE(x) ((x) << 6)
#define SC_HIZ_TILE_FIFO_SIZE(x) ((x) << 15)
#define SC_EARLYZ_TILE_FIFO_SIZE(x) ((x) << 23)
 
#define PA_SC_ENHANCE 0x8BF0
 
#define SQ_CONFIG 0x8C00
 
#define SQC_CACHES 0x8C08
 
#define SX_DEBUG_1 0x9060
 
#define SPI_STATIC_THREAD_MGMT_1 0x90E0
#define SPI_STATIC_THREAD_MGMT_2 0x90E4
#define SPI_STATIC_THREAD_MGMT_3 0x90E8
#define SPI_PS_MAX_WAVE_ID 0x90EC
 
#define SPI_CONFIG_CNTL 0x9100
 
#define SPI_CONFIG_CNTL_1 0x913C
#define VTX_DONE_DELAY(x) ((x) << 0)
#define INTERP_ONE_PRIM_PER_ROW (1 << 4)
 
#define CGTS_TCC_DISABLE 0x9148
#define CGTS_USER_TCC_DISABLE 0x914C
#define TCC_DISABLE_MASK 0xFFFF0000
#define TCC_DISABLE_SHIFT 16
 
#define TA_CNTL_AUX 0x9508
 
#define CC_RB_BACKEND_DISABLE 0x98F4
#define BACKEND_DISABLE(x) ((x) << 16)
#define GB_ADDR_CONFIG 0x98F8
#define NUM_PIPES(x) ((x) << 0)
#define NUM_PIPES_MASK 0x00000007
#define NUM_PIPES_SHIFT 0
#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
#define PIPE_INTERLEAVE_SIZE_MASK 0x00000070
#define PIPE_INTERLEAVE_SIZE_SHIFT 4
#define NUM_SHADER_ENGINES(x) ((x) << 12)
#define NUM_SHADER_ENGINES_MASK 0x00003000
#define NUM_SHADER_ENGINES_SHIFT 12
#define SHADER_ENGINE_TILE_SIZE(x) ((x) << 16)
#define SHADER_ENGINE_TILE_SIZE_MASK 0x00070000
#define SHADER_ENGINE_TILE_SIZE_SHIFT 16
#define NUM_GPUS(x) ((x) << 20)
#define NUM_GPUS_MASK 0x00700000
#define NUM_GPUS_SHIFT 20
#define MULTI_GPU_TILE_SIZE(x) ((x) << 24)
#define MULTI_GPU_TILE_SIZE_MASK 0x03000000
#define MULTI_GPU_TILE_SIZE_SHIFT 24
#define ROW_SIZE(x) ((x) << 28)
#define ROW_SIZE_MASK 0x30000000
#define ROW_SIZE_SHIFT 28
 
#define GB_TILE_MODE0 0x9910
# define MICRO_TILE_MODE(x) ((x) << 0)
# define ADDR_SURF_DISPLAY_MICRO_TILING 0
# define ADDR_SURF_THIN_MICRO_TILING 1
# define ADDR_SURF_DEPTH_MICRO_TILING 2
# define ARRAY_MODE(x) ((x) << 2)
# define ARRAY_LINEAR_GENERAL 0
# define ARRAY_LINEAR_ALIGNED 1
# define ARRAY_1D_TILED_THIN1 2
# define ARRAY_2D_TILED_THIN1 4
# define PIPE_CONFIG(x) ((x) << 6)
# define ADDR_SURF_P2 0
# define ADDR_SURF_P4_8x16 4
# define ADDR_SURF_P4_16x16 5
# define ADDR_SURF_P4_16x32 6
# define ADDR_SURF_P4_32x32 7
# define ADDR_SURF_P8_16x16_8x16 8
# define ADDR_SURF_P8_16x32_8x16 9
# define ADDR_SURF_P8_32x32_8x16 10
# define ADDR_SURF_P8_16x32_16x16 11
# define ADDR_SURF_P8_32x32_16x16 12
# define ADDR_SURF_P8_32x32_16x32 13
# define ADDR_SURF_P8_32x64_32x32 14
# define TILE_SPLIT(x) ((x) << 11)
# define ADDR_SURF_TILE_SPLIT_64B 0
# define ADDR_SURF_TILE_SPLIT_128B 1
# define ADDR_SURF_TILE_SPLIT_256B 2
# define ADDR_SURF_TILE_SPLIT_512B 3
# define ADDR_SURF_TILE_SPLIT_1KB 4
# define ADDR_SURF_TILE_SPLIT_2KB 5
# define ADDR_SURF_TILE_SPLIT_4KB 6
# define BANK_WIDTH(x) ((x) << 14)
# define ADDR_SURF_BANK_WIDTH_1 0
# define ADDR_SURF_BANK_WIDTH_2 1
# define ADDR_SURF_BANK_WIDTH_4 2
# define ADDR_SURF_BANK_WIDTH_8 3
# define BANK_HEIGHT(x) ((x) << 16)
# define ADDR_SURF_BANK_HEIGHT_1 0
# define ADDR_SURF_BANK_HEIGHT_2 1
# define ADDR_SURF_BANK_HEIGHT_4 2
# define ADDR_SURF_BANK_HEIGHT_8 3
# define MACRO_TILE_ASPECT(x) ((x) << 18)
# define ADDR_SURF_MACRO_ASPECT_1 0
# define ADDR_SURF_MACRO_ASPECT_2 1
# define ADDR_SURF_MACRO_ASPECT_4 2
# define ADDR_SURF_MACRO_ASPECT_8 3
# define NUM_BANKS(x) ((x) << 20)
# define ADDR_SURF_2_BANK 0
# define ADDR_SURF_4_BANK 1
# define ADDR_SURF_8_BANK 2
# define ADDR_SURF_16_BANK 3
 
#define CB_PERFCOUNTER0_SELECT0 0x9a20
#define CB_PERFCOUNTER0_SELECT1 0x9a24
#define CB_PERFCOUNTER1_SELECT0 0x9a28
#define CB_PERFCOUNTER1_SELECT1 0x9a2c
#define CB_PERFCOUNTER2_SELECT0 0x9a30
#define CB_PERFCOUNTER2_SELECT1 0x9a34
#define CB_PERFCOUNTER3_SELECT0 0x9a38
#define CB_PERFCOUNTER3_SELECT1 0x9a3c
 
#define GC_USER_RB_BACKEND_DISABLE 0x9B7C
#define BACKEND_DISABLE_MASK 0x00FF0000
#define BACKEND_DISABLE_SHIFT 16
 
#define TCP_CHAN_STEER_LO 0xac0c
#define TCP_CHAN_STEER_HI 0xac10
 
#define CP_RB0_BASE 0xC100
#define CP_RB0_CNTL 0xC104
#define RB_BUFSZ(x) ((x) << 0)
#define RB_BLKSZ(x) ((x) << 8)
#define BUF_SWAP_32BIT (2 << 16)
#define RB_NO_UPDATE (1 << 27)
#define RB_RPTR_WR_ENA (1 << 31)
 
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
 
#define CP_PFP_UCODE_ADDR 0xC150
#define CP_PFP_UCODE_DATA 0xC154
#define CP_ME_RAM_RADDR 0xC158
#define CP_ME_RAM_WADDR 0xC15C
#define CP_ME_RAM_DATA 0xC160
 
#define CP_CE_UCODE_ADDR 0xC168
#define CP_CE_UCODE_DATA 0xC16C
 
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
#define CP_RB1_RPTR_ADDR_HI 0xC18C
#define CP_RB1_WPTR 0xC190
#define CP_RB2_BASE 0xC194
#define CP_RB2_CNTL 0xC198
#define CP_RB2_RPTR_ADDR 0xC19C
#define CP_RB2_RPTR_ADDR_HI 0xC1A0
#define CP_RB2_WPTR 0xC1A4
#define CP_INT_CNTL_RING0 0xC1A8
#define CP_INT_CNTL_RING1 0xC1AC
#define CP_INT_CNTL_RING2 0xC1B0
# define CNTX_BUSY_INT_ENABLE (1 << 19)
# define CNTX_EMPTY_INT_ENABLE (1 << 20)
# define WAIT_MEM_SEM_INT_ENABLE (1 << 21)
# define TIME_STAMP_INT_ENABLE (1 << 26)
# define CP_RINGID2_INT_ENABLE (1 << 29)
# define CP_RINGID1_INT_ENABLE (1 << 30)
# define CP_RINGID0_INT_ENABLE (1 << 31)
#define CP_INT_STATUS_RING0 0xC1B4
#define CP_INT_STATUS_RING1 0xC1B8
#define CP_INT_STATUS_RING2 0xC1BC
# define WAIT_MEM_SEM_INT_STAT (1 << 21)
# define TIME_STAMP_INT_STAT (1 << 26)
# define CP_RINGID2_INT_STAT (1 << 29)
# define CP_RINGID1_INT_STAT (1 << 30)
# define CP_RINGID0_INT_STAT (1 << 31)
 
#define CP_DEBUG 0xC1FC
 
#define RLC_CNTL 0xC300
# define RLC_ENABLE (1 << 0)
#define RLC_RL_BASE 0xC304
#define RLC_RL_SIZE 0xC308
#define RLC_LB_CNTL 0xC30C
#define RLC_SAVE_AND_RESTORE_BASE 0xC310
#define RLC_LB_CNTR_MAX 0xC314
#define RLC_LB_CNTR_INIT 0xC318
 
#define RLC_CLEAR_STATE_RESTORE_BASE 0xC320
 
#define RLC_UCODE_ADDR 0xC32C
#define RLC_UCODE_DATA 0xC330
 
#define RLC_GPU_CLOCK_COUNT_LSB 0xC338
#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C
#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340
#define RLC_MC_CNTL 0xC344
#define RLC_UCODE_CNTL 0xC348
 
#define PA_SC_RASTER_CONFIG 0x28350
# define RASTER_CONFIG_RB_MAP_0 0
# define RASTER_CONFIG_RB_MAP_1 1
# define RASTER_CONFIG_RB_MAP_2 2
# define RASTER_CONFIG_RB_MAP_3 3
 
#define VGT_EVENT_INITIATOR 0x28a90
# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
# define SAMPLE_STREAMOUTSTATS3 (3 << 0)
# define CACHE_FLUSH_TS (4 << 0)
# define CACHE_FLUSH (6 << 0)
# define CS_PARTIAL_FLUSH (7 << 0)
# define VGT_STREAMOUT_RESET (10 << 0)
# define END_OF_PIPE_INCR_DE (11 << 0)
# define END_OF_PIPE_IB_END (12 << 0)
# define RST_PIX_CNT (13 << 0)
# define VS_PARTIAL_FLUSH (15 << 0)
# define PS_PARTIAL_FLUSH (16 << 0)
# define CACHE_FLUSH_AND_INV_TS_EVENT (20 << 0)
# define ZPASS_DONE (21 << 0)
# define CACHE_FLUSH_AND_INV_EVENT (22 << 0)
# define PERFCOUNTER_START (23 << 0)
# define PERFCOUNTER_STOP (24 << 0)
# define PIPELINESTAT_START (25 << 0)
# define PIPELINESTAT_STOP (26 << 0)
# define PERFCOUNTER_SAMPLE (27 << 0)
# define SAMPLE_PIPELINESTAT (30 << 0)
# define SAMPLE_STREAMOUTSTATS (32 << 0)
# define RESET_VTX_CNT (33 << 0)
# define VGT_FLUSH (36 << 0)
# define BOTTOM_OF_PIPE_TS (40 << 0)
# define DB_CACHE_FLUSH_AND_INV (42 << 0)
# define FLUSH_AND_INV_DB_DATA_TS (43 << 0)
# define FLUSH_AND_INV_DB_META (44 << 0)
# define FLUSH_AND_INV_CB_DATA_TS (45 << 0)
# define FLUSH_AND_INV_CB_META (46 << 0)
# define CS_DONE (47 << 0)
# define PS_DONE (48 << 0)
# define FLUSH_AND_INV_CB_PIXEL_DATA (49 << 0)
# define THREAD_TRACE_START (51 << 0)
# define THREAD_TRACE_STOP (52 << 0)
# define THREAD_TRACE_FLUSH (54 << 0)
# define THREAD_TRACE_FINISH (55 << 0)
 
/*
* PM4
*/
#define PACKET_TYPE0 0
#define PACKET_TYPE1 1
#define PACKET_TYPE2 2
#define PACKET_TYPE3 3
 
#define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
#define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
#define CP_PACKET0_GET_REG(h) (((h) & 0xFFFF) << 2)
#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
#define PACKET0(reg, n) ((PACKET_TYPE0 << 30) | \
(((reg) >> 2) & 0xFFFF) | \
((n) & 0x3FFF) << 16)
#define CP_PACKET2 0x80000000
#define PACKET2_PAD_SHIFT 0
#define PACKET2_PAD_MASK (0x3fffffff << 0)
 
#define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
 
#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
(((op) & 0xFF) << 8) | \
((n) & 0x3FFF) << 16)
 
#define PACKET3_COMPUTE(op, n) (PACKET3(op, n) | 1 << 1)
 
/* Packet 3 types */
#define PACKET3_NOP 0x10
#define PACKET3_SET_BASE 0x11
#define PACKET3_BASE_INDEX(x) ((x) << 0)
#define GDS_PARTITION_BASE 2
#define CE_PARTITION_BASE 3
#define PACKET3_CLEAR_STATE 0x12
#define PACKET3_INDEX_BUFFER_SIZE 0x13
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_ALLOC_GDS 0x1B
#define PACKET3_WRITE_GDS_RAM 0x1C
#define PACKET3_ATOMIC_GDS 0x1D
#define PACKET3_ATOMIC 0x1E
#define PACKET3_OCCLUSION_QUERY 0x1F
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
#define PACKET3_PRED_EXEC 0x23
#define PACKET3_DRAW_INDIRECT 0x24
#define PACKET3_DRAW_INDEX_INDIRECT 0x25
#define PACKET3_INDEX_BASE 0x26
#define PACKET3_DRAW_INDEX_2 0x27
#define PACKET3_CONTEXT_CONTROL 0x28
#define PACKET3_INDEX_TYPE 0x2A
#define PACKET3_DRAW_INDIRECT_MULTI 0x2C
#define PACKET3_DRAW_INDEX_AUTO 0x2D
#define PACKET3_DRAW_INDEX_IMMD 0x2E
#define PACKET3_NUM_INSTANCES 0x2F
#define PACKET3_DRAW_INDEX_MULTI_AUTO 0x30
#define PACKET3_INDIRECT_BUFFER_CONST 0x31
#define PACKET3_INDIRECT_BUFFER 0x32
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_DRAW_INDEX_OFFSET_2 0x35
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_WRITE_DATA 0x37
#define WRITE_DATA_DST_SEL(x) ((x) << 8)
/* 0 - register
* 1 - memory (sync - via GRBM)
* 2 - tc/l2
* 3 - gds
* 4 - reserved
* 5 - memory (async - direct)
*/
#define WR_ONE_ADDR (1 << 16)
#define WR_CONFIRM (1 << 20)
#define WRITE_DATA_ENGINE_SEL(x) ((x) << 30)
/* 0 - me
* 1 - pfp
* 2 - ce
*/
#define PACKET3_DRAW_INDEX_INDIRECT_MULTI 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
# define PACKET3_DEST_BASE_0_ENA (1 << 0)
# define PACKET3_DEST_BASE_1_ENA (1 << 1)
# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
# define PACKET3_CB2_DEST_BASE_ENA (1 << 8)
# define PACKET3_CB3_DEST_BASE_ENA (1 << 9)
# define PACKET3_CB4_DEST_BASE_ENA (1 << 10)
# define PACKET3_CB5_DEST_BASE_ENA (1 << 11)
# define PACKET3_CB6_DEST_BASE_ENA (1 << 12)
# define PACKET3_CB7_DEST_BASE_ENA (1 << 13)
# define PACKET3_DB_DEST_BASE_ENA (1 << 14)
# define PACKET3_DEST_BASE_2_ENA (1 << 19)
# define PACKET3_DEST_BASE_3_ENA (1 << 21)
# define PACKET3_TCL1_ACTION_ENA (1 << 22)
# define PACKET3_TC_ACTION_ENA (1 << 23)
# define PACKET3_CB_ACTION_ENA (1 << 25)
# define PACKET3_DB_ACTION_ENA (1 << 26)
# define PACKET3_SH_KCACHE_ACTION_ENA (1 << 27)
# define PACKET3_SH_ICACHE_ACTION_ENA (1 << 29)
#define PACKET3_ME_INITIALIZE 0x44
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
#define EVENT_TYPE(x) ((x) << 0)
#define EVENT_INDEX(x) ((x) << 8)
/* 0 - any non-TS event
* 1 - ZPASS_DONE
* 2 - SAMPLE_PIPELINESTAT
* 3 - SAMPLE_STREAMOUTSTAT*
* 4 - *S_PARTIAL_FLUSH
* 5 - EOP events
* 6 - EOS events
* 7 - CACHE_FLUSH, CACHE_FLUSH_AND_INV_EVENT
*/
#define INV_L2 (1 << 20)
/* INV TC L2 cache when EVENT_INDEX = 7 */
#define PACKET3_EVENT_WRITE_EOP 0x47
#define DATA_SEL(x) ((x) << 29)
/* 0 - discard
* 1 - send low 32bit data
* 2 - send 64bit data
* 3 - send 64bit counter value
*/
#define INT_SEL(x) ((x) << 24)
/* 0 - none
* 1 - interrupt only (DATA_SEL = 0)
* 2 - interrupt when data write is confirmed
*/
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
# define PACKET3_PREAMBLE_END_CLEAR_STATE (3 << 28)
#define PACKET3_ONE_REG_WRITE 0x57
#define PACKET3_LOAD_CONFIG_REG 0x5F
#define PACKET3_LOAD_CONTEXT_REG 0x60
#define PACKET3_LOAD_SH_REG 0x61
#define PACKET3_SET_CONFIG_REG 0x68
#define PACKET3_SET_CONFIG_REG_START 0x00008000
#define PACKET3_SET_CONFIG_REG_END 0x0000b000
#define PACKET3_SET_CONTEXT_REG 0x69
#define PACKET3_SET_CONTEXT_REG_START 0x00028000
#define PACKET3_SET_CONTEXT_REG_END 0x00029000
#define PACKET3_SET_CONTEXT_REG_INDIRECT 0x73
#define PACKET3_SET_RESOURCE_INDIRECT 0x74
#define PACKET3_SET_SH_REG 0x76
#define PACKET3_SET_SH_REG_START 0x0000b000
#define PACKET3_SET_SH_REG_END 0x0000c000
#define PACKET3_SET_SH_REG_OFFSET 0x77
#define PACKET3_ME_WRITE 0x7A
#define PACKET3_SCRATCH_RAM_WRITE 0x7D
#define PACKET3_SCRATCH_RAM_READ 0x7E
#define PACKET3_CE_WRITE 0x7F
#define PACKET3_LOAD_CONST_RAM 0x80
#define PACKET3_WRITE_CONST_RAM 0x81
#define PACKET3_WRITE_CONST_RAM_OFFSET 0x82
#define PACKET3_DUMP_CONST_RAM 0x83
#define PACKET3_INCREMENT_CE_COUNTER 0x84
#define PACKET3_INCREMENT_DE_COUNTER 0x85
#define PACKET3_WAIT_ON_CE_COUNTER 0x86
#define PACKET3_WAIT_ON_DE_COUNTER 0x87
#define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88
#define PACKET3_SET_CE_DE_COUNTERS 0x89
#define PACKET3_WAIT_ON_AVAIL_BUFFER 0x8A
#define PACKET3_SWITCH_BUFFER 0x8B
 
#endif