Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6321 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6104
Line 31... Line 31...
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include "radeon.h"
34
#include "radeon.h"
35
#include "radeon_asic.h"
35
#include "radeon_asic.h"
-
 
36
#include "radeon_audio.h"
36
#include "radeon_mode.h"
37
#include "radeon_mode.h"
37
#include "r600d.h"
38
#include "r600d.h"
38
#include "atom.h"
39
#include "atom.h"
39
#include "avivod.h"
40
#include "avivod.h"
40
#include "radeon_ucode.h"
41
#include "radeon_ucode.h"
Line 105... Line 106...
105
void r600_irq_disable(struct radeon_device *rdev);
106
void r600_irq_disable(struct radeon_device *rdev);
106
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
107
static void r600_pcie_gen2_enable(struct radeon_device *rdev);
107
extern int evergreen_rlc_resume(struct radeon_device *rdev);
108
extern int evergreen_rlc_resume(struct radeon_device *rdev);
108
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
109
extern void rv770_set_clk_bypass_mode(struct radeon_device *rdev);
Line -... Line 110...
-
 
110
 
-
 
111
/*
-
 
112
 * Indirect registers accessor
-
 
113
 */
-
 
114
u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg)
-
 
115
{
-
 
116
	unsigned long flags;
-
 
117
	u32 r;
-
 
118
 
-
 
119
	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
-
 
120
	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
-
 
121
	r = RREG32(R600_RCU_DATA);
-
 
122
	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
-
 
123
	return r;
-
 
124
}
-
 
125
 
-
 
126
void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
127
{
-
 
128
	unsigned long flags;
-
 
129
 
-
 
130
	spin_lock_irqsave(&rdev->rcu_idx_lock, flags);
-
 
131
	WREG32(R600_RCU_INDEX, ((reg) & 0x1fff));
-
 
132
	WREG32(R600_RCU_DATA, (v));
-
 
133
	spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags);
-
 
134
}
-
 
135
 
-
 
136
u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg)
-
 
137
{
-
 
138
	unsigned long flags;
-
 
139
	u32 r;
-
 
140
 
-
 
141
	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
-
 
142
	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
-
 
143
	r = RREG32(R600_UVD_CTX_DATA);
-
 
144
	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
-
 
145
	return r;
-
 
146
}
-
 
147
 
-
 
148
void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v)
-
 
149
{
-
 
150
	unsigned long flags;
-
 
151
 
-
 
152
	spin_lock_irqsave(&rdev->uvd_idx_lock, flags);
-
 
153
	WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff));
-
 
154
	WREG32(R600_UVD_CTX_DATA, (v));
-
 
155
	spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags);
-
 
156
}
-
 
157
 
-
 
158
/**
-
 
159
 * r600_get_allowed_info_register - fetch the register for the info ioctl
-
 
160
 *
-
 
161
 * @rdev: radeon_device pointer
-
 
162
 * @reg: register offset in bytes
-
 
163
 * @val: register value
-
 
164
 *
-
 
165
 * Returns 0 for success or -EINVAL for an invalid register
-
 
166
 *
-
 
167
 */
-
 
168
int r600_get_allowed_info_register(struct radeon_device *rdev,
-
 
169
				   u32 reg, u32 *val)
-
 
170
{
-
 
171
	switch (reg) {
-
 
172
	case GRBM_STATUS:
-
 
173
	case GRBM_STATUS2:
-
 
174
	case R_000E50_SRBM_STATUS:
-
 
175
	case DMA_STATUS_REG:
-
 
176
	case UVD_STATUS:
-
 
177
		*val = RREG32(reg);
-
 
178
		return 0;
-
 
179
	default:
-
 
180
		return -EINVAL;
-
 
181
	}
-
 
182
}
109
 
183
 
110
/**
184
/**
111
 * r600_get_xclk - get the xclk
185
 * r600_get_xclk - get the xclk
112
 *
186
 *
113
 * @rdev: radeon_device pointer
187
 * @rdev: radeon_device pointer
Line 2994... Line 3068...
2994
	if (r) {
3068
	if (r) {
2995
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
3069
		dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
2996
		return r;
3070
		return r;
2997
	}
3071
	}
Line -... Line 3072...
-
 
3072
 
-
 
3073
	if (rdev->has_uvd) {
-
 
3074
		r = uvd_v1_0_resume(rdev);
-
 
3075
		if (!r) {
-
 
3076
			r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
-
 
3077
			if (r) {
-
 
3078
				dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r);
-
 
3079
			}
-
 
3080
		}
-
 
3081
		if (r)
-
 
3082
			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
-
 
3083
	}
2998
 
3084
 
2999
	/* Enable IRQ */
3085
	/* Enable IRQ */
3000
	if (!rdev->irq.installed) {
3086
	if (!rdev->irq.installed) {
3001
		r = radeon_irq_kms_init(rdev);
3087
		r = radeon_irq_kms_init(rdev);
3002
		if (r)
3088
		if (r)
3003
			return r;
3089
			return r;
Line 3004... Line 3090...
3004
	}
3090
	}
3005
 
3091
 
3006
	r = r600_irq_init(rdev);
3092
	r = r600_irq_init(rdev);
3007
	if (r) {
3093
	if (r) {
3008
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
3094
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
3009
//		radeon_irq_kms_fini(rdev);
3095
		radeon_irq_kms_fini(rdev);
3010
		return r;
3096
		return r;
Line 3011... Line 3097...
3011
	}
3097
	}
Line 3022... Line 3108...
3022
		return r;
3108
		return r;
3023
	r = r600_cp_resume(rdev);
3109
	r = r600_cp_resume(rdev);
3024
	if (r)
3110
	if (r)
3025
		return r;
3111
		return r;
Line -... Line 3112...
-
 
3112
 
-
 
3113
	if (rdev->has_uvd) {
-
 
3114
		ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
-
 
3115
		if (ring->ring_size) {
-
 
3116
			r = radeon_ring_init(rdev, ring, ring->ring_size, 0,
-
 
3117
					     RADEON_CP_PACKET2);
-
 
3118
			if (!r)
-
 
3119
				r = uvd_v1_0_init(rdev);
-
 
3120
			if (r)
-
 
3121
				DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
-
 
3122
		}
-
 
3123
	}
3026
 
3124
 
3027
	r = radeon_ib_pool_init(rdev);
3125
	r = radeon_ib_pool_init(rdev);
3028
	if (r) {
3126
	if (r) {
3029
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3127
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3030
		return r;
3128
		return r;
Line 3122... Line 3220...
3122
	radeon_pm_init(rdev);
3220
	radeon_pm_init(rdev);
Line 3123... Line 3221...
3123
 
3221
 
3124
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
3222
	rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
Line -... Line 3223...
-
 
3223
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
-
 
3224
 
-
 
3225
	if (rdev->has_uvd) {
-
 
3226
		r = radeon_uvd_init(rdev);
-
 
3227
		if (!r) {
-
 
3228
			rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
-
 
3229
			r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096);
-
 
3230
		}
3125
	r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
3231
	}
3126
 
3232
 
Line 3127... Line 3233...
3127
	rdev->ih.ring_obj = NULL;
3233
	rdev->ih.ring_obj = NULL;
3128
	r600_ih_ring_init(rdev, 64 * 1024);
3234
	r600_ih_ring_init(rdev, 64 * 1024);
Line 3133... Line 3239...
3133
 
3239
 
3134
	rdev->accel_working = true;
3240
	rdev->accel_working = true;
3135
	r = r600_startup(rdev);
3241
	r = r600_startup(rdev);
3136
	if (r) {
3242
	if (r) {
-
 
3243
		dev_err(rdev->dev, "disabling GPU acceleration\n");
-
 
3244
		r600_cp_fini(rdev);
-
 
3245
		r600_irq_fini(rdev);
-
 
3246
		radeon_wb_fini(rdev);
-
 
3247
		radeon_ib_pool_fini(rdev);
3137
		dev_err(rdev->dev, "disabling GPU acceleration\n");
3248
		radeon_irq_kms_fini(rdev);
3138
		r600_pcie_gart_fini(rdev);
3249
		r600_pcie_gart_fini(rdev);
3139
		rdev->accel_working = false;
3250
		rdev->accel_working = false;
Line 3140... Line 3251...
3140
	}
3251
	}
3141
 
3252
 
Line -... Line 3253...
-
 
3253
	return 0;
-
 
3254
}
-
 
3255
 
-
 
3256
void r600_fini(struct radeon_device *rdev)
-
 
3257
{
-
 
3258
	radeon_pm_fini(rdev);
-
 
3259
	radeon_audio_fini(rdev);
-
 
3260
	r600_cp_fini(rdev);
-
 
3261
	r600_irq_fini(rdev);
-
 
3262
	if (rdev->has_uvd) {
-
 
3263
		uvd_v1_0_fini(rdev);
-
 
3264
		radeon_uvd_fini(rdev);
-
 
3265
	}
-
 
3266
	radeon_wb_fini(rdev);
-
 
3267
	radeon_ib_pool_fini(rdev);
-
 
3268
	radeon_irq_kms_fini(rdev);
-
 
3269
	r600_pcie_gart_fini(rdev);
-
 
3270
	r600_vram_scratch_fini(rdev);
-
 
3271
	radeon_agp_fini(rdev);
-
 
3272
	radeon_gem_fini(rdev);
-
 
3273
	radeon_fence_driver_fini(rdev);
-
 
3274
	radeon_bo_fini(rdev);
-
 
3275
	radeon_atombios_fini(rdev);
-
 
3276
	kfree(rdev->bios);
-
 
3277
	rdev->bios = NULL;
3142
	return 0;
3278
}
3143
}
3279
 
3144
 
3280
 
3145
/*
3281
/*
3146
 * CS stuff
3282
 * CS stuff
Line 3519... Line 3655...
3519
	/* enable irqs */
3655
	/* enable irqs */
3520
	r600_enable_interrupts(rdev);
3656
	r600_enable_interrupts(rdev);
Line 3521... Line 3657...
3521
 
3657
 
3522
	return ret;
3658
	return ret;
-
 
3659
}
-
 
3660
 
-
 
3661
void r600_irq_suspend(struct radeon_device *rdev)
-
 
3662
{
-
 
3663
	r600_irq_disable(rdev);
-
 
3664
	r600_rlc_stop(rdev);
-
 
3665
}
-
 
3666
 
-
 
3667
void r600_irq_fini(struct radeon_device *rdev)
-
 
3668
{
-
 
3669
	r600_irq_suspend(rdev);
-
 
3670
	r600_ih_ring_fini(rdev);
-
 
3671
}
3523
}
3672
 
3524
int r600_irq_set(struct radeon_device *rdev)
3673
int r600_irq_set(struct radeon_device *rdev)
3525
{
3674
{
3526
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3675
	u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
3527
	u32 mode_int = 0;
3676
	u32 mode_int = 0;
Line 3664... Line 3813...
3664
		WREG32(CG_THERMAL_INT, thermal_int);
3813
		WREG32(CG_THERMAL_INT, thermal_int);
3665
	} else if (rdev->family >= CHIP_RV770) {
3814
	} else if (rdev->family >= CHIP_RV770) {
3666
		WREG32(RV770_CG_THERMAL_INT, thermal_int);
3815
		WREG32(RV770_CG_THERMAL_INT, thermal_int);
3667
	}
3816
	}
Line -... Line 3817...
-
 
3817
 
-
 
3818
	/* posting read */
-
 
3819
	RREG32(R_000E50_SRBM_STATUS);
3668
 
3820
 
3669
	return 0;
3821
	return 0;
Line 3670... Line 3822...
3670
}
3822
}
3671
 
3823
 
Line 3846... Line 3998...
3846
 *    233         -  GUI Idle
3998
 *    233         -  GUI Idle
3847
 *
3999
 *
3848
 * Note, these are based on r600 and may need to be
4000
 * Note, these are based on r600 and may need to be
3849
 * adjusted or added to on newer asics
4001
 * adjusted or added to on newer asics
3850
 */
4002
 */
3851
#undef  DRM_DEBUG
-
 
3852
#define DRM_DEBUG(...)
-
 
Line 3853... Line 4003...
3853
 
4003
 
3854
int r600_irq_process(struct radeon_device *rdev)
4004
int r600_irq_process(struct radeon_device *rdev)
3855
{
4005
{
3856
	u32 wptr;
4006
	u32 wptr;
Line 3892... Line 4042...
3892
 
4042
 
3893
		switch (src_id) {
4043
		switch (src_id) {
3894
		case 1: /* D1 vblank/vline */
4044
		case 1: /* D1 vblank/vline */
3895
			switch (src_data) {
4045
			switch (src_data) {
3896
			case 0: /* D1 vblank */
4046
			case 0: /* D1 vblank */
-
 
4047
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT))
-
 
4048
					DRM_DEBUG("IH: D1 vblank - IH event w/o asserted irq bit?\n");
3897
				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VBLANK_INTERRUPT) {
4049
 
3898
					if (rdev->irq.crtc_vblank_int[0]) {
4050
				if (rdev->irq.crtc_vblank_int[0]) {
3899
//                       drm_handle_vblank(rdev->ddev, 0);
4051
					drm_handle_vblank(rdev->ddev, 0);
3900
						rdev->pm.vblank_sync = true;
4052
					rdev->pm.vblank_sync = true;
3901
//                       wake_up(&rdev->irq.vblank_queue);
4053
					wake_up(&rdev->irq.vblank_queue);
3902
					}
4054
				}
3903
//                   if (rdev->irq.pflip[0])
4055
				if (atomic_read(&rdev->irq.pflip[0]))
3904
//                       radeon_crtc_handle_flip(rdev, 0);
4056
					radeon_crtc_handle_vblank(rdev, 0);
3905
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
4057
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VBLANK_INTERRUPT;
3906
					DRM_DEBUG("IH: D1 vblank\n");
4058
				DRM_DEBUG("IH: D1 vblank\n");
3907
				}
4059
 
3908
				break;
4060
				break;
3909
			case 1: /* D1 vline */
4061
			case 1: /* D1 vline */
-
 
4062
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT))
-
 
4063
				    DRM_DEBUG("IH: D1 vline - IH event w/o asserted irq bit?\n");
3910
				if (rdev->irq.stat_regs.r600.disp_int & LB_D1_VLINE_INTERRUPT) {
4064
 
3911
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
4065
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D1_VLINE_INTERRUPT;
3912
					DRM_DEBUG("IH: D1 vline\n");
4066
				DRM_DEBUG("IH: D1 vline\n");
3913
				}
4067
 
3914
				break;
4068
				break;
3915
			default:
4069
			default:
3916
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4070
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3917
				break;
4071
				break;
3918
			}
4072
			}
3919
			break;
4073
			break;
3920
		case 5: /* D2 vblank/vline */
4074
		case 5: /* D2 vblank/vline */
3921
			switch (src_data) {
4075
			switch (src_data) {
3922
			case 0: /* D2 vblank */
4076
			case 0: /* D2 vblank */
-
 
4077
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT))
-
 
4078
					DRM_DEBUG("IH: D2 vblank - IH event w/o asserted irq bit?\n");
3923
				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VBLANK_INTERRUPT) {
4079
 
3924
					if (rdev->irq.crtc_vblank_int[1]) {
4080
				if (rdev->irq.crtc_vblank_int[1]) {
3925
//                       drm_handle_vblank(rdev->ddev, 1);
4081
					drm_handle_vblank(rdev->ddev, 1);
3926
						rdev->pm.vblank_sync = true;
4082
					rdev->pm.vblank_sync = true;
3927
//                       wake_up(&rdev->irq.vblank_queue);
4083
					wake_up(&rdev->irq.vblank_queue);
3928
					}
4084
				}
3929
//                   if (rdev->irq.pflip[1])
4085
				if (atomic_read(&rdev->irq.pflip[1]))
3930
//                       radeon_crtc_handle_flip(rdev, 1);
4086
					radeon_crtc_handle_vblank(rdev, 1);
3931
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
4087
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VBLANK_INTERRUPT;
3932
					DRM_DEBUG("IH: D2 vblank\n");
4088
				DRM_DEBUG("IH: D2 vblank\n");
3933
				}
4089
 
3934
				break;
4090
				break;
3935
			case 1: /* D1 vline */
4091
			case 1: /* D1 vline */
-
 
4092
				if (!(rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT))
-
 
4093
					DRM_DEBUG("IH: D2 vline - IH event w/o asserted irq bit?\n");
3936
				if (rdev->irq.stat_regs.r600.disp_int & LB_D2_VLINE_INTERRUPT) {
4094
 
3937
					rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
4095
				rdev->irq.stat_regs.r600.disp_int &= ~LB_D2_VLINE_INTERRUPT;
3938
					DRM_DEBUG("IH: D2 vline\n");
4096
				DRM_DEBUG("IH: D2 vline\n");
3939
				}
4097
 
3940
				break;
4098
				break;
3941
			default:
4099
			default:
3942
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4100
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3943
				break;
4101
				break;
3944
			}
4102
			}
-
 
4103
			break;
-
 
4104
		case 9: /* D1 pflip */
-
 
4105
			DRM_DEBUG("IH: D1 flip\n");
-
 
4106
			break;
-
 
4107
		case 11: /* D2 pflip */
-
 
4108
			DRM_DEBUG("IH: D2 flip\n");
3945
			break;
4109
			break;
3946
		case 19: /* HPD/DAC hotplug */
4110
		case 19: /* HPD/DAC hotplug */
3947
			switch (src_data) {
4111
			switch (src_data) {
3948
			case 0:
4112
			case 0:
-
 
4113
				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT))
-
 
4114
					DRM_DEBUG("IH: HPD1 - IH event w/o asserted irq bit?\n");
3949
				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD1_INTERRUPT) {
4115
 
3950
					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
4116
				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD1_INTERRUPT;
3951
					queue_hotplug = true;
4117
				queue_hotplug = true;
3952
					DRM_DEBUG("IH: HPD1\n");
-
 
3953
				}
4118
				DRM_DEBUG("IH: HPD1\n");
3954
				break;
4119
				break;
3955
			case 1:
4120
			case 1:
-
 
4121
				if (!(rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT))
-
 
4122
					DRM_DEBUG("IH: HPD2 - IH event w/o asserted irq bit?\n");
3956
				if (rdev->irq.stat_regs.r600.disp_int & DC_HPD2_INTERRUPT) {
4123
 
3957
					rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
4124
				rdev->irq.stat_regs.r600.disp_int &= ~DC_HPD2_INTERRUPT;
3958
					queue_hotplug = true;
4125
				queue_hotplug = true;
3959
					DRM_DEBUG("IH: HPD2\n");
-
 
3960
				}
4126
				DRM_DEBUG("IH: HPD2\n");
3961
				break;
4127
				break;
3962
			case 4:
4128
			case 4:
-
 
4129
				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT))
-
 
4130
					DRM_DEBUG("IH: HPD3 - IH event w/o asserted irq bit?\n");
3963
				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD3_INTERRUPT) {
4131
 
3964
					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
4132
				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD3_INTERRUPT;
3965
					queue_hotplug = true;
4133
				queue_hotplug = true;
3966
					DRM_DEBUG("IH: HPD3\n");
-
 
3967
				}
4134
				DRM_DEBUG("IH: HPD3\n");
3968
				break;
4135
				break;
3969
			case 5:
4136
			case 5:
-
 
4137
				if (!(rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT))
-
 
4138
					DRM_DEBUG("IH: HPD4 - IH event w/o asserted irq bit?\n");
3970
				if (rdev->irq.stat_regs.r600.disp_int_cont & DC_HPD4_INTERRUPT) {
4139
 
3971
					rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
4140
				rdev->irq.stat_regs.r600.disp_int_cont &= ~DC_HPD4_INTERRUPT;
3972
					queue_hotplug = true;
4141
				queue_hotplug = true;
3973
					DRM_DEBUG("IH: HPD4\n");
-
 
3974
				}
4142
				DRM_DEBUG("IH: HPD4\n");
3975
				break;
4143
				break;
3976
			case 10:
4144
			case 10:
-
 
4145
				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT))
-
 
4146
					DRM_DEBUG("IH: HPD5 - IH event w/o asserted irq bit?\n");
3977
				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD5_INTERRUPT) {
4147
 
3978
					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
4148
				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
3979
					queue_hotplug = true;
4149
				queue_hotplug = true;
3980
					DRM_DEBUG("IH: HPD5\n");
-
 
3981
				}
4150
				DRM_DEBUG("IH: HPD5\n");
3982
				break;
4151
				break;
3983
			case 12:
4152
			case 12:
-
 
4153
				if (!(rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT))
-
 
4154
					DRM_DEBUG("IH: HPD6 - IH event w/o asserted irq bit?\n");
3984
				if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
4155
 
3985
					rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
4156
				rdev->irq.stat_regs.r600.disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
3986
					queue_hotplug = true;
4157
				queue_hotplug = true;
3987
					DRM_DEBUG("IH: HPD6\n");
4158
				DRM_DEBUG("IH: HPD6\n");
3988
				}
4159
 
3989
				break;
4160
				break;
3990
			default:
4161
			default:
3991
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
4162
				DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id, src_data);
3992
				break;
4163
				break;
3993
			}
4164
			}
3994
			break;
4165
			break;
3995
		case 21: /* hdmi */
4166
		case 21: /* hdmi */
3996
			switch (src_data) {
4167
			switch (src_data) {
3997
			case 4:
4168
			case 4:
-
 
4169
				if (!(rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG))
-
 
4170
					DRM_DEBUG("IH: HDMI0 - IH event w/o asserted irq bit?\n");
3998
				if (rdev->irq.stat_regs.r600.hdmi0_status & HDMI0_AZ_FORMAT_WTRIG) {
4171
 
3999
					rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4172
				rdev->irq.stat_regs.r600.hdmi0_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4000
					queue_hdmi = true;
4173
				queue_hdmi = true;
4001
					DRM_DEBUG("IH: HDMI0\n");
4174
				DRM_DEBUG("IH: HDMI0\n");
4002
				}
4175
 
4003
				break;
4176
				break;
4004
			case 5:
4177
			case 5:
-
 
4178
				if (!(rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG))
-
 
4179
					DRM_DEBUG("IH: HDMI1 - IH event w/o asserted irq bit?\n");
4005
				if (rdev->irq.stat_regs.r600.hdmi1_status & HDMI0_AZ_FORMAT_WTRIG) {
4180
 
4006
					rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4181
				rdev->irq.stat_regs.r600.hdmi1_status &= ~HDMI0_AZ_FORMAT_WTRIG;
4007
					queue_hdmi = true;
4182
				queue_hdmi = true;
4008
					DRM_DEBUG("IH: HDMI1\n");
4183
				DRM_DEBUG("IH: HDMI1\n");
4009
				}
4184
 
4010
				break;
4185
				break;
4011
			default:
4186
			default:
4012
				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4187
				DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data);
4013
				break;
4188
				break;