Subversion Repositories Kolibri OS

Rev

Rev 1129 | Rev 1221 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1129 Rev 1179
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
#include 
29
#include "drmP.h"
29
#include "drmP.h"
30
#include "drm.h"
30
#include "drm.h"
31
#include "radeon_drm.h"
31
#include "radeon_drm.h"
32
#include "radeon_microcode.h"
32
#include "radeon_microcode.h"
33
#include "radeon_reg.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
34
#include "radeon.h"
-
 
35
#include "r100d.h"
-
 
36
 
-
 
37
#include "r100_reg_safe.h"
35
 
38
#include "rn50_reg_safe.h"
36
/* This files gather functions specifics to:
39
/* This files gather functions specifics to:
37
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
40
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
38
 *
41
 *
39
 * Some of these functions might be used by newer ASICs.
42
 * Some of these functions might be used by newer ASICs.
40
 */
43
 */
-
 
44
int r200_init(struct radeon_device *rdev);
41
void r100_hdp_reset(struct radeon_device *rdev);
45
void r100_hdp_reset(struct radeon_device *rdev);
42
void r100_gpu_init(struct radeon_device *rdev);
46
void r100_gpu_init(struct radeon_device *rdev);
43
int r100_gui_wait_for_idle(struct radeon_device *rdev);
47
int r100_gui_wait_for_idle(struct radeon_device *rdev);
44
int r100_mc_wait_for_idle(struct radeon_device *rdev);
48
int r100_mc_wait_for_idle(struct radeon_device *rdev);
45
void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
49
void r100_gpu_wait_for_vsync(struct radeon_device *rdev);
46
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
50
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev);
47
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
51
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
-
 
52
 
48
 
53
 
49
/*
54
/*
50
 * PCI GART
55
 * PCI GART
51
 */
56
 */
52
void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
57
void r100_pci_gart_tlb_flush(struct radeon_device *rdev)
53
{
58
{
54
	/* TODO: can we do somethings here ? */
59
	/* TODO: can we do somethings here ? */
55
	/* It seems hw only cache one entry so we should discard this
60
	/* It seems hw only cache one entry so we should discard this
56
	 * entry otherwise if first GPU GART read hit this entry it
61
	 * entry otherwise if first GPU GART read hit this entry it
57
	 * could end up in wrong address. */
62
	 * could end up in wrong address. */
58
}
63
}
59
 
64
 
60
int r100_pci_gart_enable(struct radeon_device *rdev)
65
int r100_pci_gart_init(struct radeon_device *rdev)
61
{
-
 
62
	uint32_t tmp;
66
{
-
 
67
	int r;
-
 
68
 
-
 
69
	if (rdev->gart.table.ram.ptr) {
-
 
70
		WARN(1, "R100 PCI GART already initialized.\n");
63
	int r;
71
		return 0;
64
 
72
	}
65
	/* Initialize common gart structure */
73
	/* Initialize common gart structure */
66
	r = radeon_gart_init(rdev);
74
	r = radeon_gart_init(rdev);
67
	if (r) {
-
 
68
		return r;
-
 
69
	}
75
	if (r)
-
 
76
		return r;
-
 
77
		rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
70
	if (rdev->gart.table.ram.ptr == NULL) {
78
	rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
71
		rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
-
 
72
		r = radeon_gart_table_ram_alloc(rdev);
-
 
73
		if (r) {
-
 
74
			return r;
79
	rdev->asic->gart_set_page = &r100_pci_gart_set_page;
-
 
80
	return radeon_gart_table_ram_alloc(rdev);
-
 
81
}
-
 
82
 
-
 
83
int r100_pci_gart_enable(struct radeon_device *rdev)
-
 
84
{
75
		}
85
	uint32_t tmp;
76
	}
86
 
77
	/* discard memory request outside of configured range */
87
	/* discard memory request outside of configured range */
78
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
88
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
79
	WREG32(RADEON_AIC_CNTL, tmp);
89
	WREG32(RADEON_AIC_CNTL, tmp);
80
	/* set address range for PCI address translate */
90
	/* set address range for PCI address translate */
81
	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
91
	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
82
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
92
	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
83
	WREG32(RADEON_AIC_HI_ADDR, tmp);
93
	WREG32(RADEON_AIC_HI_ADDR, tmp);
84
	/* Enable bus mastering */
94
	/* Enable bus mastering */
85
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
95
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
86
	WREG32(RADEON_BUS_CNTL, tmp);
96
	WREG32(RADEON_BUS_CNTL, tmp);
87
	/* set PCI GART page-table base address */
97
	/* set PCI GART page-table base address */
88
	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
98
	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
89
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
99
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
90
	WREG32(RADEON_AIC_CNTL, tmp);
100
	WREG32(RADEON_AIC_CNTL, tmp);
91
	r100_pci_gart_tlb_flush(rdev);
101
	r100_pci_gart_tlb_flush(rdev);
92
	rdev->gart.ready = true;
102
	rdev->gart.ready = true;
93
	return 0;
103
	return 0;
94
}
104
}
95
 
105
 
96
void r100_pci_gart_disable(struct radeon_device *rdev)
106
void r100_pci_gart_disable(struct radeon_device *rdev)
97
{
107
{
98
	uint32_t tmp;
108
	uint32_t tmp;
99
 
109
 
100
	/* discard memory request outside of configured range */
110
	/* discard memory request outside of configured range */
101
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
111
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
102
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
112
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
103
	WREG32(RADEON_AIC_LO_ADDR, 0);
113
	WREG32(RADEON_AIC_LO_ADDR, 0);
104
	WREG32(RADEON_AIC_HI_ADDR, 0);
114
	WREG32(RADEON_AIC_HI_ADDR, 0);
105
}
115
}
106
 
-
 
107
 
116
 
108
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
117
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
109
{
118
{
110
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
119
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
111
		return -EINVAL;
120
		return -EINVAL;
112
	}
121
	}
113
	rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr);
122
	rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr));
114
	return 0;
123
	return 0;
115
}
124
}
116
 
125
 
117
int r100_gart_enable(struct radeon_device *rdev)
126
void r100_pci_gart_fini(struct radeon_device *rdev)
118
{
-
 
119
	if (rdev->flags & RADEON_IS_AGP) {
127
{
120
		r100_pci_gart_disable(rdev);
128
		r100_pci_gart_disable(rdev);
121
		return 0;
-
 
122
	}
129
	radeon_gart_table_ram_free(rdev);
123
	return r100_pci_gart_enable(rdev);
130
	radeon_gart_fini(rdev);
124
}
131
}
125
 
132
 
126
 
133
 
127
/*
134
/*
128
 * MC
135
 * MC
129
 */
136
 */
130
void r100_mc_disable_clients(struct radeon_device *rdev)
137
void r100_mc_disable_clients(struct radeon_device *rdev)
131
{
138
{
132
	uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
139
	uint32_t ov0_scale_cntl, crtc_ext_cntl, crtc_gen_cntl, crtc2_gen_cntl;
133
 
140
 
134
	/* FIXME: is this function correct for rs100,rs200,rs300 ? */
141
	/* FIXME: is this function correct for rs100,rs200,rs300 ? */
135
	if (r100_gui_wait_for_idle(rdev)) {
142
	if (r100_gui_wait_for_idle(rdev)) {
136
		printk(KERN_WARNING "Failed to wait GUI idle while "
143
		printk(KERN_WARNING "Failed to wait GUI idle while "
137
		       "programming pipes. Bad things might happen.\n");
144
		       "programming pipes. Bad things might happen.\n");
138
	}
145
	}
139
 
146
 
140
	/* stop display and memory access */
147
	/* stop display and memory access */
141
	ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
148
	ov0_scale_cntl = RREG32(RADEON_OV0_SCALE_CNTL);
142
	WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
149
	WREG32(RADEON_OV0_SCALE_CNTL, ov0_scale_cntl & ~RADEON_SCALER_ENABLE);
143
	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
150
	crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
144
	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
151
	WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl | RADEON_CRTC_DISPLAY_DIS);
145
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
152
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
146
 
153
 
147
	r100_gpu_wait_for_vsync(rdev);
154
	r100_gpu_wait_for_vsync(rdev);
148
 
155
 
149
	WREG32(RADEON_CRTC_GEN_CNTL,
156
	WREG32(RADEON_CRTC_GEN_CNTL,
150
	       (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
157
	       (crtc_gen_cntl & ~(RADEON_CRTC_CUR_EN | RADEON_CRTC_ICON_EN)) |
151
	       RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
158
	       RADEON_CRTC_DISP_REQ_EN_B | RADEON_CRTC_EXT_DISP_EN);
152
 
159
 
153
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
160
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
154
		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
161
		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
155
 
162
 
156
		r100_gpu_wait_for_vsync2(rdev);
163
		r100_gpu_wait_for_vsync2(rdev);
157
		WREG32(RADEON_CRTC2_GEN_CNTL,
164
		WREG32(RADEON_CRTC2_GEN_CNTL,
158
		       (crtc2_gen_cntl &
165
		       (crtc2_gen_cntl &
159
		        ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
166
		        ~(RADEON_CRTC2_CUR_EN | RADEON_CRTC2_ICON_EN)) |
160
		       RADEON_CRTC2_DISP_REQ_EN_B);
167
		       RADEON_CRTC2_DISP_REQ_EN_B);
161
	}
168
	}
162
 
169
 
163
	udelay(500);
170
	udelay(500);
164
}
171
}
165
 
172
 
166
void r100_mc_setup(struct radeon_device *rdev)
173
void r100_mc_setup(struct radeon_device *rdev)
167
{
174
{
168
	uint32_t tmp;
175
	uint32_t tmp;
169
	int r;
176
	int r;
170
 
177
 
171
	r = r100_debugfs_mc_info_init(rdev);
178
	r = r100_debugfs_mc_info_init(rdev);
172
	if (r) {
179
	if (r) {
173
		DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
180
		DRM_ERROR("Failed to register debugfs file for R100 MC !\n");
174
	}
181
	}
175
	/* Write VRAM size in case we are limiting it */
182
	/* Write VRAM size in case we are limiting it */
176
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
183
	WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
-
 
184
	/* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM,
-
 
185
	 * if the aperture is 64MB but we have 32MB VRAM
-
 
186
	 * we report only 32MB VRAM but we have to set MC_FB_LOCATION
-
 
187
	 * to 64MB, otherwise the gpu accidentially dies */
177
	tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1;
188
	tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
178
	tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
189
	tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16);
179
	tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
190
	tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16);
180
	WREG32(RADEON_MC_FB_LOCATION, tmp);
191
	WREG32(RADEON_MC_FB_LOCATION, tmp);
181
 
192
 
182
	/* Enable bus mastering */
193
	/* Enable bus mastering */
183
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
194
	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
184
	WREG32(RADEON_BUS_CNTL, tmp);
195
	WREG32(RADEON_BUS_CNTL, tmp);
185
 
196
 
186
	if (rdev->flags & RADEON_IS_AGP) {
197
	if (rdev->flags & RADEON_IS_AGP) {
187
		tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
198
        tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
188
		tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
199
		tmp = REG_SET(RADEON_MC_AGP_TOP, tmp >> 16);
189
		tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
200
		tmp |= REG_SET(RADEON_MC_AGP_START, rdev->mc.gtt_location >> 16);
190
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
201
		WREG32(RADEON_MC_AGP_LOCATION, tmp);
191
		WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
202
		WREG32(RADEON_AGP_BASE, rdev->mc.agp_base);
192
	} else {
203
	} else {
193
		WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
204
		WREG32(RADEON_MC_AGP_LOCATION, 0x0FFFFFFF);
194
		WREG32(RADEON_AGP_BASE, 0);
205
		WREG32(RADEON_AGP_BASE, 0);
195
	}
206
	}
196
 
207
 
197
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
208
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
198
	tmp |= (7 << 28);
209
	tmp |= (7 << 28);
199
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
210
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
200
	(void)RREG32(RADEON_HOST_PATH_CNTL);
211
	(void)RREG32(RADEON_HOST_PATH_CNTL);
201
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
212
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
202
	(void)RREG32(RADEON_HOST_PATH_CNTL);
213
	(void)RREG32(RADEON_HOST_PATH_CNTL);
203
}
214
}
204
 
215
 
205
int r100_mc_init(struct radeon_device *rdev)
216
int r100_mc_init(struct radeon_device *rdev)
206
{
217
{
207
	int r;
218
	int r;
208
 
219
 
209
	if (r100_debugfs_rbbm_init(rdev)) {
220
	if (r100_debugfs_rbbm_init(rdev)) {
210
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
221
		DRM_ERROR("Failed to register debugfs file for RBBM !\n");
211
	}
222
	}
212
 
223
 
213
	r100_gpu_init(rdev);
224
	r100_gpu_init(rdev);
214
	/* Disable gart which also disable out of gart access */
225
	/* Disable gart which also disable out of gart access */
215
	r100_pci_gart_disable(rdev);
226
	r100_pci_gart_disable(rdev);
216
 
227
 
217
	/* Setup GPU memory space */
228
	/* Setup GPU memory space */
218
	rdev->mc.vram_location = 0xFFFFFFFFUL;
-
 
219
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
229
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
220
	if (rdev->flags & RADEON_IS_AGP) {
-
 
221
		r = radeon_agp_init(rdev);
-
 
222
		if (r) {
-
 
223
			printk(KERN_WARNING "[drm] Disabling AGP\n");
-
 
224
			rdev->flags &= ~RADEON_IS_AGP;
-
 
225
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-
 
226
		} else {
-
 
227
			rdev->mc.gtt_location = rdev->mc.agp_base;
-
 
228
		}
-
 
229
	}
-
 
230
	r = radeon_mc_setup(rdev);
230
	r = radeon_mc_setup(rdev);
231
	if (r) {
231
	if (r) {
232
		return r;
232
		return r;
233
	}
233
	}
234
 
234
 
235
	r100_mc_disable_clients(rdev);
235
	r100_mc_disable_clients(rdev);
236
	if (r100_mc_wait_for_idle(rdev)) {
236
	if (r100_mc_wait_for_idle(rdev)) {
237
       printk(KERN_WARNING "Failed to wait MC idle while "
237
       printk(KERN_WARNING "Failed to wait MC idle while "
238
              "programming pipes. Bad things might happen.\n");
238
              "programming pipes. Bad things might happen.\n");
239
	}
239
	}
240
 
240
 
241
	r100_mc_setup(rdev);
241
	r100_mc_setup(rdev);
242
	return 0;
242
	return 0;
243
}
243
}
244
 
244
 
245
void r100_mc_fini(struct radeon_device *rdev)
245
void r100_mc_fini(struct radeon_device *rdev)
246
{
246
{
247
	r100_pci_gart_disable(rdev);
-
 
248
//   radeon_gart_table_ram_free(rdev);
-
 
249
//   radeon_gart_fini(rdev);
-
 
250
}
247
}
-
 
248
 
-
 
249
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
-
 
250
{
-
 
251
	if (crtc == 0)
-
 
252
		return RREG32(RADEON_CRTC_CRNT_FRAME);
-
 
253
	else
-
 
254
		return RREG32(RADEON_CRTC2_CRNT_FRAME);
-
 
255
}
-
 
256
 
251
 
257
 
252
/*
258
/*
253
 * Fence emission
259
 * Fence emission
254
 */
260
 */
255
void r100_fence_ring_emit(struct radeon_device *rdev,
261
void r100_fence_ring_emit(struct radeon_device *rdev,
256
			  struct radeon_fence *fence)
262
			  struct radeon_fence *fence)
257
{
263
{
258
	/* Who ever call radeon_fence_emit should call ring_lock and ask
264
	/* Who ever call radeon_fence_emit should call ring_lock and ask
259
	 * for enough space (today caller are ib schedule and buffer move) */
265
	 * for enough space (today caller are ib schedule and buffer move) */
260
	/* Wait until IDLE & CLEAN */
266
	/* Wait until IDLE & CLEAN */
261
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
267
	radeon_ring_write(rdev, PACKET0(0x1720, 0));
262
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
268
	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
263
	/* Emit fence sequence & fire IRQ */
269
	/* Emit fence sequence & fire IRQ */
264
	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
270
	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
265
	radeon_ring_write(rdev, fence->seq);
271
	radeon_ring_write(rdev, fence->seq);
266
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
272
	radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
267
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
273
	radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
268
}
274
}
269
 
275
 
270
#if 0
276
#if 0
271
/*
277
/*
272
 * Writeback
278
 * Writeback
273
 */
279
 */
274
int r100_wb_init(struct radeon_device *rdev)
280
int r100_wb_init(struct radeon_device *rdev)
275
{
281
{
276
	int r;
282
	int r;
277
 
283
 
278
	if (rdev->wb.wb_obj == NULL) {
284
	if (rdev->wb.wb_obj == NULL) {
279
		r = radeon_object_create(rdev, NULL, 4096,
285
		r = radeon_object_create(rdev, NULL, 4096,
280
					 true,
286
					 true,
281
					 RADEON_GEM_DOMAIN_GTT,
287
					 RADEON_GEM_DOMAIN_GTT,
282
					 false, &rdev->wb.wb_obj);
288
					 false, &rdev->wb.wb_obj);
283
		if (r) {
289
		if (r) {
284
			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
290
			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
285
			return r;
291
			return r;
286
		}
292
		}
287
		r = radeon_object_pin(rdev->wb.wb_obj,
293
		r = radeon_object_pin(rdev->wb.wb_obj,
288
				      RADEON_GEM_DOMAIN_GTT,
294
				      RADEON_GEM_DOMAIN_GTT,
289
				      &rdev->wb.gpu_addr);
295
				      &rdev->wb.gpu_addr);
290
		if (r) {
296
		if (r) {
291
			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
297
			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
292
			return r;
298
			return r;
293
		}
299
		}
294
		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
300
		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
295
		if (r) {
301
		if (r) {
296
			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
302
			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
297
			return r;
303
			return r;
298
		}
304
		}
299
	}
305
	}
300
	WREG32(0x774, rdev->wb.gpu_addr);
306
	WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr);
-
 
307
	WREG32(R_00070C_CP_RB_RPTR_ADDR,
301
	WREG32(0x70C, rdev->wb.gpu_addr + 1024);
308
		S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2));
302
	WREG32(0x770, 0xff);
309
	WREG32(R_000770_SCRATCH_UMSK, 0xff);
303
	return 0;
310
	return 0;
304
}
311
}
-
 
312
 
-
 
313
void r100_wb_disable(struct radeon_device *rdev)
-
 
314
{
-
 
315
	WREG32(R_000770_SCRATCH_UMSK, 0);
-
 
316
}
305
 
317
 
306
void r100_wb_fini(struct radeon_device *rdev)
318
void r100_wb_fini(struct radeon_device *rdev)
-
 
319
{
307
{
320
	r100_wb_disable(rdev);
308
	if (rdev->wb.wb_obj) {
321
	if (rdev->wb.wb_obj) {
309
//       radeon_object_kunmap(rdev->wb.wb_obj);
322
//       radeon_object_kunmap(rdev->wb.wb_obj);
310
//       radeon_object_unpin(rdev->wb.wb_obj);
323
//       radeon_object_unpin(rdev->wb.wb_obj);
311
//       radeon_object_unref(&rdev->wb.wb_obj);
324
//       radeon_object_unref(&rdev->wb.wb_obj);
312
		rdev->wb.wb = NULL;
325
		rdev->wb.wb = NULL;
313
		rdev->wb.wb_obj = NULL;
326
		rdev->wb.wb_obj = NULL;
314
	}
327
	}
315
}
328
}
316
 
-
 
317
 
329
 
318
int r100_copy_blit(struct radeon_device *rdev,
330
int r100_copy_blit(struct radeon_device *rdev,
319
		   uint64_t src_offset,
331
		   uint64_t src_offset,
320
		   uint64_t dst_offset,
332
		   uint64_t dst_offset,
321
		   unsigned num_pages,
333
		   unsigned num_pages,
322
		   struct radeon_fence *fence)
334
		   struct radeon_fence *fence)
323
{
335
{
324
	uint32_t cur_pages;
336
	uint32_t cur_pages;
325
	uint32_t stride_bytes = PAGE_SIZE;
337
	uint32_t stride_bytes = PAGE_SIZE;
326
	uint32_t pitch;
338
	uint32_t pitch;
327
	uint32_t stride_pixels;
339
	uint32_t stride_pixels;
328
	unsigned ndw;
340
	unsigned ndw;
329
	int num_loops;
341
	int num_loops;
330
	int r = 0;
342
	int r = 0;
331
 
343
 
332
	/* radeon limited to 16k stride */
344
	/* radeon limited to 16k stride */
333
	stride_bytes &= 0x3fff;
345
	stride_bytes &= 0x3fff;
334
	/* radeon pitch is /64 */
346
	/* radeon pitch is /64 */
335
	pitch = stride_bytes / 64;
347
	pitch = stride_bytes / 64;
336
	stride_pixels = stride_bytes / 4;
348
	stride_pixels = stride_bytes / 4;
337
	num_loops = DIV_ROUND_UP(num_pages, 8191);
349
	num_loops = DIV_ROUND_UP(num_pages, 8191);
338
 
350
 
339
	/* Ask for enough room for blit + flush + fence */
351
	/* Ask for enough room for blit + flush + fence */
340
	ndw = 64 + (10 * num_loops);
352
	ndw = 64 + (10 * num_loops);
341
	r = radeon_ring_lock(rdev, ndw);
353
	r = radeon_ring_lock(rdev, ndw);
342
	if (r) {
354
	if (r) {
343
		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
355
		DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
344
		return -EINVAL;
356
		return -EINVAL;
345
	}
357
	}
346
	while (num_pages > 0) {
358
	while (num_pages > 0) {
347
		cur_pages = num_pages;
359
		cur_pages = num_pages;
348
		if (cur_pages > 8191) {
360
		if (cur_pages > 8191) {
349
			cur_pages = 8191;
361
			cur_pages = 8191;
350
		}
362
		}
351
		num_pages -= cur_pages;
363
		num_pages -= cur_pages;
352
 
364
 
353
		/* pages are in Y direction - height
365
		/* pages are in Y direction - height
354
		   page width in X direction - width */
366
		   page width in X direction - width */
355
		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
367
		radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
356
		radeon_ring_write(rdev,
368
		radeon_ring_write(rdev,
357
				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
369
				  RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
358
				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
370
				  RADEON_GMC_DST_PITCH_OFFSET_CNTL |
359
				  RADEON_GMC_SRC_CLIPPING |
371
				  RADEON_GMC_SRC_CLIPPING |
360
				  RADEON_GMC_DST_CLIPPING |
372
				  RADEON_GMC_DST_CLIPPING |
361
				  RADEON_GMC_BRUSH_NONE |
373
				  RADEON_GMC_BRUSH_NONE |
362
				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
374
				  (RADEON_COLOR_FORMAT_ARGB8888 << 8) |
363
				  RADEON_GMC_SRC_DATATYPE_COLOR |
375
				  RADEON_GMC_SRC_DATATYPE_COLOR |
364
				  RADEON_ROP3_S |
376
				  RADEON_ROP3_S |
365
				  RADEON_DP_SRC_SOURCE_MEMORY |
377
				  RADEON_DP_SRC_SOURCE_MEMORY |
366
				  RADEON_GMC_CLR_CMP_CNTL_DIS |
378
				  RADEON_GMC_CLR_CMP_CNTL_DIS |
367
				  RADEON_GMC_WR_MSK_DIS);
379
				  RADEON_GMC_WR_MSK_DIS);
368
		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
380
		radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
369
		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
381
		radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
370
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
382
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
371
		radeon_ring_write(rdev, 0);
383
		radeon_ring_write(rdev, 0);
372
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
384
		radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
373
		radeon_ring_write(rdev, num_pages);
385
		radeon_ring_write(rdev, num_pages);
374
		radeon_ring_write(rdev, num_pages);
386
		radeon_ring_write(rdev, num_pages);
375
		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
387
		radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
376
	}
388
	}
377
	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
389
	radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
378
	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
390
	radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
379
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
391
	radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
380
	radeon_ring_write(rdev,
392
	radeon_ring_write(rdev,
381
			  RADEON_WAIT_2D_IDLECLEAN |
393
			  RADEON_WAIT_2D_IDLECLEAN |
382
			  RADEON_WAIT_HOST_IDLECLEAN |
394
			  RADEON_WAIT_HOST_IDLECLEAN |
383
			  RADEON_WAIT_DMA_GUI_IDLE);
395
			  RADEON_WAIT_DMA_GUI_IDLE);
384
	if (fence) {
396
	if (fence) {
385
		r = radeon_fence_emit(rdev, fence);
397
		r = radeon_fence_emit(rdev, fence);
386
	}
398
	}
387
	radeon_ring_unlock_commit(rdev);
399
	radeon_ring_unlock_commit(rdev);
388
	return r;
400
	return r;
389
}
401
}
390
 
402
 
391
#endif
403
#endif
392
 
404
 
393
/*
405
/*
394
 * CP
406
 * CP
395
 */
407
 */
-
 
408
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
-
 
409
{
-
 
410
	unsigned i;
-
 
411
	u32 tmp;
-
 
412
 
-
 
413
	for (i = 0; i < rdev->usec_timeout; i++) {
-
 
414
		tmp = RREG32(R_000E40_RBBM_STATUS);
-
 
415
		if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) {
-
 
416
			return 0;
-
 
417
		}
-
 
418
		udelay(1);
-
 
419
	}
-
 
420
	return -1;
-
 
421
}
-
 
422
 
396
void r100_ring_start(struct radeon_device *rdev)
423
void r100_ring_start(struct radeon_device *rdev)
397
{
424
{
398
	int r;
425
	int r;
399
 
426
 
400
	r = radeon_ring_lock(rdev, 2);
427
	r = radeon_ring_lock(rdev, 2);
401
	if (r) {
428
	if (r) {
402
		return;
429
		return;
403
	}
430
	}
404
	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
431
	radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
405
	radeon_ring_write(rdev,
432
	radeon_ring_write(rdev,
406
			  RADEON_ISYNC_ANY2D_IDLE3D |
433
			  RADEON_ISYNC_ANY2D_IDLE3D |
407
			  RADEON_ISYNC_ANY3D_IDLE2D |
434
			  RADEON_ISYNC_ANY3D_IDLE2D |
408
			  RADEON_ISYNC_WAIT_IDLEGUI |
435
			  RADEON_ISYNC_WAIT_IDLEGUI |
409
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
436
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
410
	radeon_ring_unlock_commit(rdev);
437
	radeon_ring_unlock_commit(rdev);
411
}
438
}
412
 
439
 
413
static void r100_cp_load_microcode(struct radeon_device *rdev)
440
static void r100_cp_load_microcode(struct radeon_device *rdev)
414
{
441
{
415
	int i;
442
	int i;
416
 
443
 
417
	if (r100_gui_wait_for_idle(rdev)) {
444
	if (r100_gui_wait_for_idle(rdev)) {
418
		printk(KERN_WARNING "Failed to wait GUI idle while "
445
		printk(KERN_WARNING "Failed to wait GUI idle while "
419
		       "programming pipes. Bad things might happen.\n");
446
		       "programming pipes. Bad things might happen.\n");
420
	}
447
	}
421
 
448
 
422
	WREG32(RADEON_CP_ME_RAM_ADDR, 0);
449
	WREG32(RADEON_CP_ME_RAM_ADDR, 0);
423
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
450
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
424
	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
451
	    (rdev->family == CHIP_RV200) || (rdev->family == CHIP_RS100) ||
425
	    (rdev->family == CHIP_RS200)) {
452
	    (rdev->family == CHIP_RS200)) {
426
		DRM_INFO("Loading R100 Microcode\n");
453
		DRM_INFO("Loading R100 Microcode\n");
427
		for (i = 0; i < 256; i++) {
454
		for (i = 0; i < 256; i++) {
428
			WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
455
			WREG32(RADEON_CP_ME_RAM_DATAH, R100_cp_microcode[i][1]);
429
			WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
456
			WREG32(RADEON_CP_ME_RAM_DATAL, R100_cp_microcode[i][0]);
430
		}
457
		}
431
	} else if ((rdev->family == CHIP_R200) ||
458
	} else if ((rdev->family == CHIP_R200) ||
432
		   (rdev->family == CHIP_RV250) ||
459
		   (rdev->family == CHIP_RV250) ||
433
		   (rdev->family == CHIP_RV280) ||
460
		   (rdev->family == CHIP_RV280) ||
434
		   (rdev->family == CHIP_RS300)) {
461
		   (rdev->family == CHIP_RS300)) {
435
		DRM_INFO("Loading R200 Microcode\n");
462
		DRM_INFO("Loading R200 Microcode\n");
436
		for (i = 0; i < 256; i++) {
463
		for (i = 0; i < 256; i++) {
437
			WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
464
			WREG32(RADEON_CP_ME_RAM_DATAH, R200_cp_microcode[i][1]);
438
			WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
465
			WREG32(RADEON_CP_ME_RAM_DATAL, R200_cp_microcode[i][0]);
439
		}
466
		}
440
	} else if ((rdev->family == CHIP_R300) ||
467
	} else if ((rdev->family == CHIP_R300) ||
441
		   (rdev->family == CHIP_R350) ||
468
		   (rdev->family == CHIP_R350) ||
442
		   (rdev->family == CHIP_RV350) ||
469
		   (rdev->family == CHIP_RV350) ||
443
		   (rdev->family == CHIP_RV380) ||
470
		   (rdev->family == CHIP_RV380) ||
444
		   (rdev->family == CHIP_RS400) ||
471
		   (rdev->family == CHIP_RS400) ||
445
		   (rdev->family == CHIP_RS480)) {
472
		   (rdev->family == CHIP_RS480)) {
446
		DRM_INFO("Loading R300 Microcode\n");
473
		DRM_INFO("Loading R300 Microcode\n");
447
		for (i = 0; i < 256; i++) {
474
		for (i = 0; i < 256; i++) {
448
			WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
475
			WREG32(RADEON_CP_ME_RAM_DATAH, R300_cp_microcode[i][1]);
449
			WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
476
			WREG32(RADEON_CP_ME_RAM_DATAL, R300_cp_microcode[i][0]);
450
		}
477
		}
451
	} else if ((rdev->family == CHIP_R420) ||
478
	} else if ((rdev->family == CHIP_R420) ||
452
		   (rdev->family == CHIP_R423) ||
479
		   (rdev->family == CHIP_R423) ||
453
		   (rdev->family == CHIP_RV410)) {
480
		   (rdev->family == CHIP_RV410)) {
454
		DRM_INFO("Loading R400 Microcode\n");
481
		DRM_INFO("Loading R400 Microcode\n");
455
		for (i = 0; i < 256; i++) {
482
		for (i = 0; i < 256; i++) {
456
			WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
483
			WREG32(RADEON_CP_ME_RAM_DATAH, R420_cp_microcode[i][1]);
457
			WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
484
			WREG32(RADEON_CP_ME_RAM_DATAL, R420_cp_microcode[i][0]);
458
		}
485
		}
459
	} else if ((rdev->family == CHIP_RS690) ||
486
	} else if ((rdev->family == CHIP_RS690) ||
460
		   (rdev->family == CHIP_RS740)) {
487
		   (rdev->family == CHIP_RS740)) {
461
		DRM_INFO("Loading RS690/RS740 Microcode\n");
488
		DRM_INFO("Loading RS690/RS740 Microcode\n");
462
		for (i = 0; i < 256; i++) {
489
		for (i = 0; i < 256; i++) {
463
			WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
490
			WREG32(RADEON_CP_ME_RAM_DATAH, RS690_cp_microcode[i][1]);
464
			WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
491
			WREG32(RADEON_CP_ME_RAM_DATAL, RS690_cp_microcode[i][0]);
465
		}
492
		}
466
	} else if (rdev->family == CHIP_RS600) {
493
	} else if (rdev->family == CHIP_RS600) {
467
		DRM_INFO("Loading RS600 Microcode\n");
494
		DRM_INFO("Loading RS600 Microcode\n");
468
		for (i = 0; i < 256; i++) {
495
		for (i = 0; i < 256; i++) {
469
			WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
496
			WREG32(RADEON_CP_ME_RAM_DATAH, RS600_cp_microcode[i][1]);
470
			WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
497
			WREG32(RADEON_CP_ME_RAM_DATAL, RS600_cp_microcode[i][0]);
471
		}
498
		}
472
	} else if ((rdev->family == CHIP_RV515) ||
499
	} else if ((rdev->family == CHIP_RV515) ||
473
		   (rdev->family == CHIP_R520) ||
500
		   (rdev->family == CHIP_R520) ||
474
		   (rdev->family == CHIP_RV530) ||
501
		   (rdev->family == CHIP_RV530) ||
475
		   (rdev->family == CHIP_R580) ||
502
		   (rdev->family == CHIP_R580) ||
476
		   (rdev->family == CHIP_RV560) ||
503
		   (rdev->family == CHIP_RV560) ||
477
		   (rdev->family == CHIP_RV570)) {
504
		   (rdev->family == CHIP_RV570)) {
478
		DRM_INFO("Loading R500 Microcode\n");
505
		DRM_INFO("Loading R500 Microcode\n");
479
		for (i = 0; i < 256; i++) {
506
		for (i = 0; i < 256; i++) {
480
			WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
507
			WREG32(RADEON_CP_ME_RAM_DATAH, R520_cp_microcode[i][1]);
481
			WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
508
			WREG32(RADEON_CP_ME_RAM_DATAL, R520_cp_microcode[i][0]);
482
		}
509
		}
483
	}
510
	}
484
}
511
}
-
 
512
 
-
 
513
static int r100_cp_init_microcode(struct radeon_device *rdev)
-
 
514
{
-
 
515
    return 0;
-
 
516
}
-
 
517
 
485
 
518
 
486
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
519
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
487
{
520
{
488
	unsigned rb_bufsz;
521
	unsigned rb_bufsz;
489
	unsigned rb_blksz;
522
	unsigned rb_blksz;
490
	unsigned max_fetch;
523
	unsigned max_fetch;
491
	unsigned pre_write_timer;
524
	unsigned pre_write_timer;
492
	unsigned pre_write_limit;
525
	unsigned pre_write_limit;
493
	unsigned indirect2_start;
526
	unsigned indirect2_start;
494
	unsigned indirect1_start;
527
	unsigned indirect1_start;
495
	uint32_t tmp;
528
	uint32_t tmp;
496
	int r;
529
	int r;
497
 
530
 
498
	if (r100_debugfs_cp_init(rdev)) {
531
	if (r100_debugfs_cp_init(rdev)) {
499
		DRM_ERROR("Failed to register debugfs file for CP !\n");
532
		DRM_ERROR("Failed to register debugfs file for CP !\n");
500
	}
533
	}
501
	/* Reset CP */
534
	/* Reset CP */
502
	tmp = RREG32(RADEON_CP_CSQ_STAT);
535
	tmp = RREG32(RADEON_CP_CSQ_STAT);
503
	if ((tmp & (1 << 31))) {
536
	if ((tmp & (1 << 31))) {
504
		DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
537
		DRM_INFO("radeon: cp busy (0x%08X) resetting\n", tmp);
505
		WREG32(RADEON_CP_CSQ_MODE, 0);
538
		WREG32(RADEON_CP_CSQ_MODE, 0);
506
		WREG32(RADEON_CP_CSQ_CNTL, 0);
539
		WREG32(RADEON_CP_CSQ_CNTL, 0);
507
		WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
540
		WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
508
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
541
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
509
		mdelay(2);
542
		mdelay(2);
510
		WREG32(RADEON_RBBM_SOFT_RESET, 0);
543
		WREG32(RADEON_RBBM_SOFT_RESET, 0);
511
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
544
		tmp = RREG32(RADEON_RBBM_SOFT_RESET);
512
		mdelay(2);
545
		mdelay(2);
513
		tmp = RREG32(RADEON_CP_CSQ_STAT);
546
		tmp = RREG32(RADEON_CP_CSQ_STAT);
514
		if ((tmp & (1 << 31))) {
547
		if ((tmp & (1 << 31))) {
515
			DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
548
			DRM_INFO("radeon: cp reset failed (0x%08X)\n", tmp);
516
		}
549
		}
517
	} else {
550
	} else {
518
		DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
551
		DRM_INFO("radeon: cp idle (0x%08X)\n", tmp);
519
	}
552
	}
-
 
553
 
-
 
554
	if (!rdev->me_fw) {
-
 
555
		r = r100_cp_init_microcode(rdev);
-
 
556
		if (r) {
-
 
557
			DRM_ERROR("Failed to load firmware!\n");
-
 
558
			return r;
-
 
559
		}
-
 
560
	}
-
 
561
 
520
	/* Align ring size */
562
	/* Align ring size */
521
	rb_bufsz = drm_order(ring_size / 8);
563
	rb_bufsz = drm_order(ring_size / 8);
522
	ring_size = (1 << (rb_bufsz + 1)) * 4;
564
	ring_size = (1 << (rb_bufsz + 1)) * 4;
523
	r100_cp_load_microcode(rdev);
565
	r100_cp_load_microcode(rdev);
524
	r = radeon_ring_init(rdev, ring_size);
566
	r = radeon_ring_init(rdev, ring_size);
525
	if (r) {
567
	if (r) {
526
		return r;
568
		return r;
527
	}
569
	}
528
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
570
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
529
	 * the rptr copy in system ram */
571
	 * the rptr copy in system ram */
530
	rb_blksz = 9;
572
	rb_blksz = 9;
531
	/* cp will read 128bytes at a time (4 dwords) */
573
	/* cp will read 128bytes at a time (4 dwords) */
532
	max_fetch = 1;
574
	max_fetch = 1;
533
	rdev->cp.align_mask = 16 - 1;
575
	rdev->cp.align_mask = 16 - 1;
534
	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
576
	/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
535
	pre_write_timer = 64;
577
	pre_write_timer = 64;
536
	/* Force CP_RB_WPTR write if written more than one time before the
578
	/* Force CP_RB_WPTR write if written more than one time before the
537
	 * delay expire
579
	 * delay expire
538
	 */
580
	 */
539
	pre_write_limit = 0;
581
	pre_write_limit = 0;
540
	/* Setup the cp cache like this (cache size is 96 dwords) :
582
	/* Setup the cp cache like this (cache size is 96 dwords) :
541
	 *	RING		0  to 15
583
	 *	RING		0  to 15
542
	 *	INDIRECT1	16 to 79
584
	 *	INDIRECT1	16 to 79
543
	 *	INDIRECT2	80 to 95
585
	 *	INDIRECT2	80 to 95
544
	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
586
	 * So ring cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
545
	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
587
	 *    indirect1 cache size is 64dwords (> (2 * max_fetch = 2 * 4dwords))
546
	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
588
	 *    indirect2 cache size is 16dwords (> (2 * max_fetch = 2 * 4dwords))
547
	 * Idea being that most of the gpu cmd will be through indirect1 buffer
589
	 * Idea being that most of the gpu cmd will be through indirect1 buffer
548
	 * so it gets the bigger cache.
590
	 * so it gets the bigger cache.
549
	 */
591
	 */
550
	indirect2_start = 80;
592
	indirect2_start = 80;
551
	indirect1_start = 16;
593
	indirect1_start = 16;
552
	/* cp setup */
594
	/* cp setup */
553
	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
595
	WREG32(0x718, pre_write_timer | (pre_write_limit << 28));
554
	WREG32(RADEON_CP_RB_CNTL,
596
	WREG32(RADEON_CP_RB_CNTL,
555
#ifdef __BIG_ENDIAN
597
#ifdef __BIG_ENDIAN
556
	       RADEON_BUF_SWAP_32BIT |
598
	       RADEON_BUF_SWAP_32BIT |
557
#endif
599
#endif
558
	       REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
600
	       REG_SET(RADEON_RB_BUFSZ, rb_bufsz) |
559
	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
601
	       REG_SET(RADEON_RB_BLKSZ, rb_blksz) |
560
	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
602
	       REG_SET(RADEON_MAX_FETCH, max_fetch) |
561
	       RADEON_RB_NO_UPDATE);
603
	       RADEON_RB_NO_UPDATE);
562
	/* Set ring address */
604
	/* Set ring address */
563
	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
605
	DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
564
	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
606
	WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
565
	/* Force read & write ptr to 0 */
607
	/* Force read & write ptr to 0 */
566
	tmp = RREG32(RADEON_CP_RB_CNTL);
608
	tmp = RREG32(RADEON_CP_RB_CNTL);
567
	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
609
	WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA);
568
	WREG32(RADEON_CP_RB_RPTR_WR, 0);
610
	WREG32(RADEON_CP_RB_RPTR_WR, 0);
569
	WREG32(RADEON_CP_RB_WPTR, 0);
611
	WREG32(RADEON_CP_RB_WPTR, 0);
570
	WREG32(RADEON_CP_RB_CNTL, tmp);
612
	WREG32(RADEON_CP_RB_CNTL, tmp);
571
	udelay(10);
613
	udelay(10);
572
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
614
	rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
573
	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
615
	rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
574
	/* Set cp mode to bus mastering & enable cp*/
616
	/* Set cp mode to bus mastering & enable cp*/
575
	WREG32(RADEON_CP_CSQ_MODE,
617
	WREG32(RADEON_CP_CSQ_MODE,
576
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
618
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
577
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
619
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
578
	WREG32(0x718, 0);
620
	WREG32(0x718, 0);
579
	WREG32(0x744, 0x00004D4D);
621
	WREG32(0x744, 0x00004D4D);
580
	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
622
	WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
581
	radeon_ring_start(rdev);
623
	radeon_ring_start(rdev);
582
	r = radeon_ring_test(rdev);
624
	r = radeon_ring_test(rdev);
583
	if (r) {
625
	if (r) {
584
		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
626
		DRM_ERROR("radeon: cp isn't working (%d).\n", r);
585
		return r;
627
		return r;
586
	}
628
	}
587
	rdev->cp.ready = true;
629
	rdev->cp.ready = true;
588
	return 0;
630
	return 0;
589
}
631
}
590
 
-
 
591
 
632
 
592
void r100_cp_fini(struct radeon_device *rdev)
633
void r100_cp_fini(struct radeon_device *rdev)
-
 
634
{
-
 
635
	if (r100_cp_wait_for_idle(rdev)) {
-
 
636
		DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n");
593
{
637
	}
594
	/* Disable ring */
638
	/* Disable ring */
595
	rdev->cp.ready = false;
-
 
596
	WREG32(RADEON_CP_CSQ_CNTL, 0);
639
	r100_cp_disable(rdev);
597
	radeon_ring_fini(rdev);
640
	radeon_ring_fini(rdev);
598
	DRM_INFO("radeon: cp finalized\n");
641
	DRM_INFO("radeon: cp finalized\n");
599
}
642
}
600
 
643
 
601
void r100_cp_disable(struct radeon_device *rdev)
644
void r100_cp_disable(struct radeon_device *rdev)
602
{
645
{
603
	/* Disable ring */
646
	/* Disable ring */
604
	rdev->cp.ready = false;
647
	rdev->cp.ready = false;
605
	WREG32(RADEON_CP_CSQ_MODE, 0);
648
	WREG32(RADEON_CP_CSQ_MODE, 0);
606
	WREG32(RADEON_CP_CSQ_CNTL, 0);
649
	WREG32(RADEON_CP_CSQ_CNTL, 0);
607
	if (r100_gui_wait_for_idle(rdev)) {
650
	if (r100_gui_wait_for_idle(rdev)) {
608
		printk(KERN_WARNING "Failed to wait GUI idle while "
651
		printk(KERN_WARNING "Failed to wait GUI idle while "
609
		       "programming pipes. Bad things might happen.\n");
652
		       "programming pipes. Bad things might happen.\n");
610
	}
653
	}
611
}
654
}
612
 
-
 
613
 
655
 
614
int r100_cp_reset(struct radeon_device *rdev)
656
int r100_cp_reset(struct radeon_device *rdev)
615
{
657
{
616
	uint32_t tmp;
658
	uint32_t tmp;
617
	bool reinit_cp;
659
	bool reinit_cp;
618
	int i;
660
	int i;
619
 
661
 
620
    dbgprintf("%s\n",__FUNCTION__);
-
 
621
 
662
    ENTER();
622
 
663
 
623
	reinit_cp = rdev->cp.ready;
664
	reinit_cp = rdev->cp.ready;
624
	rdev->cp.ready = false;
665
	rdev->cp.ready = false;
625
	WREG32(RADEON_CP_CSQ_MODE, 0);
666
	WREG32(RADEON_CP_CSQ_MODE, 0);
626
	WREG32(RADEON_CP_CSQ_CNTL, 0);
667
	WREG32(RADEON_CP_CSQ_CNTL, 0);
627
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
668
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_CP);
628
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
669
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
629
	udelay(200);
670
	udelay(200);
630
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
671
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
631
	/* Wait to prevent race in RBBM_STATUS */
672
	/* Wait to prevent race in RBBM_STATUS */
632
	mdelay(1);
673
	mdelay(1);
633
	for (i = 0; i < rdev->usec_timeout; i++) {
674
	for (i = 0; i < rdev->usec_timeout; i++) {
634
		tmp = RREG32(RADEON_RBBM_STATUS);
675
		tmp = RREG32(RADEON_RBBM_STATUS);
635
		if (!(tmp & (1 << 16))) {
676
		if (!(tmp & (1 << 16))) {
636
			DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
677
			DRM_INFO("CP reset succeed (RBBM_STATUS=0x%08X)\n",
637
				 tmp);
678
				 tmp);
638
			if (reinit_cp) {
679
			if (reinit_cp) {
639
				return r100_cp_init(rdev, rdev->cp.ring_size);
680
				return r100_cp_init(rdev, rdev->cp.ring_size);
640
			}
681
			}
641
			return 0;
682
			return 0;
642
		}
683
		}
643
		DRM_UDELAY(1);
684
		DRM_UDELAY(1);
644
	}
685
	}
645
	tmp = RREG32(RADEON_RBBM_STATUS);
686
	tmp = RREG32(RADEON_RBBM_STATUS);
646
	DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
687
	DRM_ERROR("Failed to reset CP (RBBM_STATUS=0x%08X)!\n", tmp);
647
	return -1;
688
	return -1;
648
}
689
}
-
 
690
 
-
 
691
void r100_cp_commit(struct radeon_device *rdev)
-
 
692
{
-
 
693
	WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
-
 
694
	(void)RREG32(RADEON_CP_RB_WPTR);
-
 
695
}
-
 
696
 
649
 
697
 
650
#if 0
698
#if 0
651
/*
699
/*
652
 * CS functions
700
 * CS functions
653
 */
701
 */
654
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
702
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
655
			  struct radeon_cs_packet *pkt,
703
			  struct radeon_cs_packet *pkt,
656
			  const unsigned *auth, unsigned n,
704
			  const unsigned *auth, unsigned n,
657
			  radeon_packet0_check_t check)
705
			  radeon_packet0_check_t check)
658
{
706
{
659
	unsigned reg;
707
	unsigned reg;
660
	unsigned i, j, m;
708
	unsigned i, j, m;
661
	unsigned idx;
709
	unsigned idx;
662
	int r;
710
	int r;
663
 
711
 
664
	idx = pkt->idx + 1;
712
	idx = pkt->idx + 1;
665
	reg = pkt->reg;
713
	reg = pkt->reg;
666
	/* Check that register fall into register range
714
	/* Check that register fall into register range
667
	 * determined by the number of entry (n) in the
715
	 * determined by the number of entry (n) in the
668
	 * safe register bitmap.
716
	 * safe register bitmap.
669
	 */
717
	 */
670
	if (pkt->one_reg_wr) {
718
	if (pkt->one_reg_wr) {
671
		if ((reg >> 7) > n) {
719
		if ((reg >> 7) > n) {
672
			return -EINVAL;
720
			return -EINVAL;
673
		}
721
		}
674
	} else {
722
	} else {
675
		if (((reg + (pkt->count << 2)) >> 7) > n) {
723
		if (((reg + (pkt->count << 2)) >> 7) > n) {
676
			return -EINVAL;
724
			return -EINVAL;
677
		}
725
		}
678
	}
726
	}
679
	for (i = 0; i <= pkt->count; i++, idx++) {
727
	for (i = 0; i <= pkt->count; i++, idx++) {
680
		j = (reg >> 7);
728
		j = (reg >> 7);
681
		m = 1 << ((reg >> 2) & 31);
729
		m = 1 << ((reg >> 2) & 31);
682
		if (auth[j] & m) {
730
		if (auth[j] & m) {
683
			r = check(p, pkt, idx, reg);
731
			r = check(p, pkt, idx, reg);
684
			if (r) {
732
			if (r) {
685
				return r;
733
				return r;
686
			}
734
			}
687
		}
735
		}
688
		if (pkt->one_reg_wr) {
736
		if (pkt->one_reg_wr) {
689
			if (!(auth[j] & m)) {
737
			if (!(auth[j] & m)) {
690
				break;
738
				break;
691
			}
739
			}
692
		} else {
740
		} else {
693
			reg += 4;
741
			reg += 4;
694
		}
742
		}
695
	}
743
	}
696
	return 0;
744
	return 0;
697
}
745
}
698
 
746
 
699
void r100_cs_dump_packet(struct radeon_cs_parser *p,
747
void r100_cs_dump_packet(struct radeon_cs_parser *p,
700
			 struct radeon_cs_packet *pkt)
748
			 struct radeon_cs_packet *pkt)
701
{
749
{
702
	struct radeon_cs_chunk *ib_chunk;
750
	struct radeon_cs_chunk *ib_chunk;
703
	volatile uint32_t *ib;
751
	volatile uint32_t *ib;
704
	unsigned i;
752
	unsigned i;
705
	unsigned idx;
753
	unsigned idx;
706
 
754
 
707
	ib = p->ib->ptr;
755
	ib = p->ib->ptr;
708
	ib_chunk = &p->chunks[p->chunk_ib_idx];
756
	ib_chunk = &p->chunks[p->chunk_ib_idx];
709
	idx = pkt->idx;
757
	idx = pkt->idx;
710
	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
758
	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
711
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
759
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
712
	}
760
	}
713
}
761
}
714
 
762
 
715
/**
763
/**
716
 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
764
 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
717
 * @parser:	parser structure holding parsing context.
765
 * @parser:	parser structure holding parsing context.
718
 * @pkt:	where to store packet informations
766
 * @pkt:	where to store packet informations
719
 *
767
 *
720
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
768
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
721
 * if packet is bigger than remaining ib size. or if packets is unknown.
769
 * if packet is bigger than remaining ib size. or if packets is unknown.
722
 **/
770
 **/
723
int r100_cs_packet_parse(struct radeon_cs_parser *p,
771
int r100_cs_packet_parse(struct radeon_cs_parser *p,
724
			 struct radeon_cs_packet *pkt,
772
			 struct radeon_cs_packet *pkt,
725
			 unsigned idx)
773
			 unsigned idx)
726
{
774
{
727
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
775
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
728
	uint32_t header = ib_chunk->kdata[idx];
776
	uint32_t header;
729
 
777
 
730
	if (idx >= ib_chunk->length_dw) {
778
	if (idx >= ib_chunk->length_dw) {
731
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
779
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
732
			  idx, ib_chunk->length_dw);
780
			  idx, ib_chunk->length_dw);
733
		return -EINVAL;
781
		return -EINVAL;
734
	}
782
	}
-
 
783
	header = ib_chunk->kdata[idx];
735
	pkt->idx = idx;
784
	pkt->idx = idx;
736
	pkt->type = CP_PACKET_GET_TYPE(header);
785
	pkt->type = CP_PACKET_GET_TYPE(header);
737
	pkt->count = CP_PACKET_GET_COUNT(header);
786
	pkt->count = CP_PACKET_GET_COUNT(header);
738
	switch (pkt->type) {
787
	switch (pkt->type) {
739
	case PACKET_TYPE0:
788
	case PACKET_TYPE0:
740
		pkt->reg = CP_PACKET0_GET_REG(header);
789
		pkt->reg = CP_PACKET0_GET_REG(header);
741
		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
790
		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
742
		break;
791
		break;
743
	case PACKET_TYPE3:
792
	case PACKET_TYPE3:
744
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
793
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
745
		break;
794
		break;
746
	case PACKET_TYPE2:
795
	case PACKET_TYPE2:
747
		pkt->count = -1;
796
		pkt->count = -1;
748
		break;
797
		break;
749
	default:
798
	default:
750
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
799
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
751
		return -EINVAL;
800
		return -EINVAL;
752
	}
801
	}
753
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
802
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
754
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
803
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
755
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
804
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
756
		return -EINVAL;
805
		return -EINVAL;
757
	}
806
	}
758
	return 0;
807
	return 0;
759
}
808
}
760
 
809
 
761
/**
810
/**
-
 
811
 * r100_cs_packet_next_vline() - parse userspace VLINE packet
-
 
812
 * @parser:		parser structure holding parsing context.
-
 
813
 *
-
 
814
 * Userspace sends a special sequence for VLINE waits.
-
 
815
 * PACKET0 - VLINE_START_END + value
-
 
816
 * PACKET0 - WAIT_UNTIL +_value
-
 
817
 * RELOC (P3) - crtc_id in reloc.
-
 
818
 *
-
 
819
 * This function parses this and relocates the VLINE START END
-
 
820
 * and WAIT UNTIL packets to the correct crtc.
-
 
821
 * It also detects a switched off crtc and nulls out the
-
 
822
 * wait in that case.
-
 
823
 */
-
 
824
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
-
 
825
{
-
 
826
	struct radeon_cs_chunk *ib_chunk;
-
 
827
	struct drm_mode_object *obj;
-
 
828
	struct drm_crtc *crtc;
-
 
829
	struct radeon_crtc *radeon_crtc;
-
 
830
	struct radeon_cs_packet p3reloc, waitreloc;
-
 
831
	int crtc_id;
-
 
832
	int r;
-
 
833
	uint32_t header, h_idx, reg;
-
 
834
 
-
 
835
	ib_chunk = &p->chunks[p->chunk_ib_idx];
-
 
836
 
-
 
837
	/* parse the wait until */
-
 
838
	r = r100_cs_packet_parse(p, &waitreloc, p->idx);
-
 
839
	if (r)
-
 
840
		return r;
-
 
841
 
-
 
842
	/* check its a wait until and only 1 count */
-
 
843
	if (waitreloc.reg != RADEON_WAIT_UNTIL ||
-
 
844
	    waitreloc.count != 0) {
-
 
845
		DRM_ERROR("vline wait had illegal wait until segment\n");
-
 
846
		r = -EINVAL;
-
 
847
		return r;
-
 
848
	}
-
 
849
 
-
 
850
	if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
-
 
851
		DRM_ERROR("vline wait had illegal wait until\n");
-
 
852
		r = -EINVAL;
-
 
853
		return r;
-
 
854
	}
-
 
855
 
-
 
856
	/* jump over the NOP */
-
 
857
	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
-
 
858
	if (r)
-
 
859
		return r;
-
 
860
 
-
 
861
	h_idx = p->idx - 2;
-
 
862
	p->idx += waitreloc.count;
-
 
863
	p->idx += p3reloc.count;
-
 
864
 
-
 
865
	header = ib_chunk->kdata[h_idx];
-
 
866
	crtc_id = ib_chunk->kdata[h_idx + 5];
-
 
867
	reg = ib_chunk->kdata[h_idx] >> 2;
-
 
868
	mutex_lock(&p->rdev->ddev->mode_config.mutex);
-
 
869
	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
-
 
870
	if (!obj) {
-
 
871
		DRM_ERROR("cannot find crtc %d\n", crtc_id);
-
 
872
		r = -EINVAL;
-
 
873
		goto out;
-
 
874
	}
-
 
875
	crtc = obj_to_crtc(obj);
-
 
876
	radeon_crtc = to_radeon_crtc(crtc);
-
 
877
	crtc_id = radeon_crtc->crtc_id;
-
 
878
 
-
 
879
	if (!crtc->enabled) {
-
 
880
		/* if the CRTC isn't enabled - we need to nop out the wait until */
-
 
881
		ib_chunk->kdata[h_idx + 2] = PACKET2(0);
-
 
882
		ib_chunk->kdata[h_idx + 3] = PACKET2(0);
-
 
883
	} else if (crtc_id == 1) {
-
 
884
		switch (reg) {
-
 
885
		case AVIVO_D1MODE_VLINE_START_END:
-
 
886
			header &= R300_CP_PACKET0_REG_MASK;
-
 
887
			header |= AVIVO_D2MODE_VLINE_START_END >> 2;
-
 
888
			break;
-
 
889
		case RADEON_CRTC_GUI_TRIG_VLINE:
-
 
890
			header &= R300_CP_PACKET0_REG_MASK;
-
 
891
			header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
-
 
892
			break;
-
 
893
		default:
-
 
894
			DRM_ERROR("unknown crtc reloc\n");
-
 
895
			r = -EINVAL;
-
 
896
			goto out;
-
 
897
		}
-
 
898
		ib_chunk->kdata[h_idx] = header;
-
 
899
		ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
-
 
900
	}
-
 
901
out:
-
 
902
	mutex_unlock(&p->rdev->ddev->mode_config.mutex);
-
 
903
	return r;
-
 
904
}
-
 
905
 
-
 
906
/**
762
 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
907
 * r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3
763
 * @parser:		parser structure holding parsing context.
908
 * @parser:		parser structure holding parsing context.
764
 * @data:		pointer to relocation data
909
 * @data:		pointer to relocation data
765
 * @offset_start:	starting offset
910
 * @offset_start:	starting offset
766
 * @offset_mask:	offset mask (to align start offset on)
911
 * @offset_mask:	offset mask (to align start offset on)
767
 * @reloc:		reloc informations
912
 * @reloc:		reloc informations
768
 *
913
 *
769
 * Check next packet is relocation packet3, do bo validation and compute
914
 * Check next packet is relocation packet3, do bo validation and compute
770
 * GPU offset using the provided start.
915
 * GPU offset using the provided start.
771
 **/
916
 **/
772
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
917
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
773
			      struct radeon_cs_reloc **cs_reloc)
918
			      struct radeon_cs_reloc **cs_reloc)
774
{
919
{
775
	struct radeon_cs_chunk *ib_chunk;
920
	struct radeon_cs_chunk *ib_chunk;
776
	struct radeon_cs_chunk *relocs_chunk;
921
	struct radeon_cs_chunk *relocs_chunk;
777
	struct radeon_cs_packet p3reloc;
922
	struct radeon_cs_packet p3reloc;
778
	unsigned idx;
923
	unsigned idx;
779
	int r;
924
	int r;
780
 
925
 
781
	if (p->chunk_relocs_idx == -1) {
926
	if (p->chunk_relocs_idx == -1) {
782
		DRM_ERROR("No relocation chunk !\n");
927
		DRM_ERROR("No relocation chunk !\n");
783
		return -EINVAL;
928
		return -EINVAL;
784
	}
929
	}
785
	*cs_reloc = NULL;
930
	*cs_reloc = NULL;
786
	ib_chunk = &p->chunks[p->chunk_ib_idx];
931
	ib_chunk = &p->chunks[p->chunk_ib_idx];
787
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
932
	relocs_chunk = &p->chunks[p->chunk_relocs_idx];
788
	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
933
	r = r100_cs_packet_parse(p, &p3reloc, p->idx);
789
	if (r) {
934
	if (r) {
790
		return r;
935
		return r;
791
	}
936
	}
792
	p->idx += p3reloc.count + 2;
937
	p->idx += p3reloc.count + 2;
793
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
938
	if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
794
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
939
		DRM_ERROR("No packet3 for relocation for packet at %d.\n",
795
			  p3reloc.idx);
940
			  p3reloc.idx);
796
		r100_cs_dump_packet(p, &p3reloc);
941
		r100_cs_dump_packet(p, &p3reloc);
797
		return -EINVAL;
942
		return -EINVAL;
798
	}
943
	}
799
	idx = ib_chunk->kdata[p3reloc.idx + 1];
944
	idx = ib_chunk->kdata[p3reloc.idx + 1];
800
	if (idx >= relocs_chunk->length_dw) {
945
	if (idx >= relocs_chunk->length_dw) {
801
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
946
		DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
802
			  idx, relocs_chunk->length_dw);
947
			  idx, relocs_chunk->length_dw);
803
		r100_cs_dump_packet(p, &p3reloc);
948
		r100_cs_dump_packet(p, &p3reloc);
804
		return -EINVAL;
949
		return -EINVAL;
805
	}
950
	}
806
	/* FIXME: we assume reloc size is 4 dwords */
951
	/* FIXME: we assume reloc size is 4 dwords */
807
	*cs_reloc = p->relocs_ptr[(idx / 4)];
952
	*cs_reloc = p->relocs_ptr[(idx / 4)];
808
	return 0;
953
	return 0;
809
}
954
}
-
 
955
 
-
 
956
static int r100_get_vtx_size(uint32_t vtx_fmt)
-
 
957
{
-
 
958
	int vtx_size;
-
 
959
	vtx_size = 2;
-
 
960
	/* ordered according to bits in spec */
-
 
961
	if (vtx_fmt & RADEON_SE_VTX_FMT_W0)
-
 
962
		vtx_size++;
-
 
963
	if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR)
-
 
964
		vtx_size += 3;
-
 
965
	if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA)
-
 
966
		vtx_size++;
-
 
967
	if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR)
-
 
968
		vtx_size++;
-
 
969
	if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC)
-
 
970
		vtx_size += 3;
-
 
971
	if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG)
-
 
972
		vtx_size++;
-
 
973
	if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC)
-
 
974
		vtx_size++;
-
 
975
	if (vtx_fmt & RADEON_SE_VTX_FMT_ST0)
-
 
976
		vtx_size += 2;
-
 
977
	if (vtx_fmt & RADEON_SE_VTX_FMT_ST1)
-
 
978
		vtx_size += 2;
-
 
979
	if (vtx_fmt & RADEON_SE_VTX_FMT_Q1)
-
 
980
		vtx_size++;
-
 
981
	if (vtx_fmt & RADEON_SE_VTX_FMT_ST2)
-
 
982
		vtx_size += 2;
-
 
983
	if (vtx_fmt & RADEON_SE_VTX_FMT_Q2)
-
 
984
		vtx_size++;
-
 
985
	if (vtx_fmt & RADEON_SE_VTX_FMT_ST3)
-
 
986
		vtx_size += 2;
-
 
987
	if (vtx_fmt & RADEON_SE_VTX_FMT_Q3)
-
 
988
		vtx_size++;
-
 
989
	if (vtx_fmt & RADEON_SE_VTX_FMT_Q0)
-
 
990
		vtx_size++;
-
 
991
	/* blend weight */
-
 
992
	if (vtx_fmt & (0x7 << 15))
-
 
993
		vtx_size += (vtx_fmt >> 15) & 0x7;
-
 
994
	if (vtx_fmt & RADEON_SE_VTX_FMT_N0)
-
 
995
		vtx_size += 3;
-
 
996
	if (vtx_fmt & RADEON_SE_VTX_FMT_XY1)
-
 
997
		vtx_size += 2;
-
 
998
	if (vtx_fmt & RADEON_SE_VTX_FMT_Z1)
-
 
999
		vtx_size++;
-
 
1000
	if (vtx_fmt & RADEON_SE_VTX_FMT_W1)
-
 
1001
		vtx_size++;
-
 
1002
	if (vtx_fmt & RADEON_SE_VTX_FMT_N1)
-
 
1003
		vtx_size++;
-
 
1004
	if (vtx_fmt & RADEON_SE_VTX_FMT_Z)
-
 
1005
		vtx_size++;
-
 
1006
	return vtx_size;
-
 
1007
}
810
 
1008
 
811
static int r100_packet0_check(struct radeon_cs_parser *p,
1009
static int r100_packet0_check(struct radeon_cs_parser *p,
-
 
1010
			      struct radeon_cs_packet *pkt,
812
			      struct radeon_cs_packet *pkt)
1011
			      unsigned idx, unsigned reg)
813
{
1012
{
814
	struct radeon_cs_chunk *ib_chunk;
1013
	struct radeon_cs_chunk *ib_chunk;
815
	struct radeon_cs_reloc *reloc;
1014
	struct radeon_cs_reloc *reloc;
-
 
1015
	struct r100_cs_track *track;
816
	volatile uint32_t *ib;
1016
	volatile uint32_t *ib;
817
	uint32_t tmp;
1017
	uint32_t tmp;
818
	unsigned reg;
-
 
819
	unsigned i;
-
 
820
	unsigned idx;
-
 
821
	bool onereg;
-
 
822
	int r;
1018
	int r;
-
 
1019
	int i, face;
-
 
1020
	u32 tile_flags = 0;
823
 
1021
 
824
	ib = p->ib->ptr;
1022
	ib = p->ib->ptr;
825
	ib_chunk = &p->chunks[p->chunk_ib_idx];
1023
	ib_chunk = &p->chunks[p->chunk_ib_idx];
826
	idx = pkt->idx + 1;
-
 
827
	reg = pkt->reg;
-
 
828
	onereg = false;
-
 
829
	if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) {
1024
	track = (struct r100_cs_track *)p->track;
830
		onereg = true;
-
 
831
	}
1025
 
832
	for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
-
 
833
		switch (reg) {
1026
		switch (reg) {
-
 
1027
		case RADEON_CRTC_GUI_TRIG_VLINE:
-
 
1028
			r = r100_cs_packet_parse_vline(p);
-
 
1029
			if (r) {
-
 
1030
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1031
						idx, reg);
-
 
1032
				r100_cs_dump_packet(p, pkt);
-
 
1033
				return r;
-
 
1034
			}
-
 
1035
			break;
834
		/* FIXME: only allow PACKET3 blit? easier to check for out of
1036
		/* FIXME: only allow PACKET3 blit? easier to check for out of
835
		 * range access */
1037
		 * range access */
836
		case RADEON_DST_PITCH_OFFSET:
1038
		case RADEON_DST_PITCH_OFFSET:
837
		case RADEON_SRC_PITCH_OFFSET:
1039
		case RADEON_SRC_PITCH_OFFSET:
-
 
1040
		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
-
 
1041
		if (r)
-
 
1042
			return r;
-
 
1043
		break;
-
 
1044
	case RADEON_RB3D_DEPTHOFFSET:
838
			r = r100_cs_packet_next_reloc(p, &reloc);
1045
			r = r100_cs_packet_next_reloc(p, &reloc);
839
			if (r) {
1046
			if (r) {
840
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1047
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
841
					  idx, reg);
1048
					  idx, reg);
842
				r100_cs_dump_packet(p, pkt);
1049
				r100_cs_dump_packet(p, pkt);
843
				return r;
1050
				return r;
844
			}
1051
			}
845
			tmp = ib_chunk->kdata[idx] & 0x003fffff;
1052
		track->zb.robj = reloc->robj;
846
			tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1053
		track->zb.offset = ib_chunk->kdata[idx];
847
			ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
1054
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
848
			break;
1055
			break;
849
		case RADEON_RB3D_DEPTHOFFSET:
-
 
850
		case RADEON_RB3D_COLOROFFSET:
1056
		case RADEON_RB3D_COLOROFFSET:
851
		case R300_RB3D_COLOROFFSET0:
1057
		r = r100_cs_packet_next_reloc(p, &reloc);
852
		case R300_ZB_DEPTHOFFSET:
1058
		if (r) {
853
		case R200_PP_TXOFFSET_0:
1059
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
854
		case R200_PP_TXOFFSET_1:
1060
				  idx, reg);
855
		case R200_PP_TXOFFSET_2:
1061
			r100_cs_dump_packet(p, pkt);
856
		case R200_PP_TXOFFSET_3:
1062
			return r;
-
 
1063
		}
857
		case R200_PP_TXOFFSET_4:
1064
		track->cb[0].robj = reloc->robj;
858
		case R200_PP_TXOFFSET_5:
1065
		track->cb[0].offset = ib_chunk->kdata[idx];
-
 
1066
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1067
		break;
859
		case RADEON_PP_TXOFFSET_0:
1068
		case RADEON_PP_TXOFFSET_0:
860
		case RADEON_PP_TXOFFSET_1:
1069
		case RADEON_PP_TXOFFSET_1:
861
		case RADEON_PP_TXOFFSET_2:
1070
		case RADEON_PP_TXOFFSET_2:
862
		case R300_TX_OFFSET_0:
1071
		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
-
 
1072
		r = r100_cs_packet_next_reloc(p, &reloc);
-
 
1073
		if (r) {
-
 
1074
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1075
				  idx, reg);
-
 
1076
			r100_cs_dump_packet(p, pkt);
-
 
1077
			return r;
-
 
1078
		}
-
 
1079
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1080
		track->textures[i].robj = reloc->robj;
-
 
1081
		break;
863
		case R300_TX_OFFSET_0+4:
1082
	case RADEON_PP_CUBIC_OFFSET_T0_0:
864
		case R300_TX_OFFSET_0+8:
1083
	case RADEON_PP_CUBIC_OFFSET_T0_1:
865
		case R300_TX_OFFSET_0+12:
1084
	case RADEON_PP_CUBIC_OFFSET_T0_2:
866
		case R300_TX_OFFSET_0+16:
1085
	case RADEON_PP_CUBIC_OFFSET_T0_3:
867
		case R300_TX_OFFSET_0+20:
1086
	case RADEON_PP_CUBIC_OFFSET_T0_4:
-
 
1087
		i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4;
-
 
1088
		r = r100_cs_packet_next_reloc(p, &reloc);
-
 
1089
		if (r) {
-
 
1090
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1091
				  idx, reg);
-
 
1092
			r100_cs_dump_packet(p, pkt);
-
 
1093
			return r;
-
 
1094
		}
-
 
1095
		track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
-
 
1096
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1097
		track->textures[0].cube_info[i].robj = reloc->robj;
-
 
1098
		break;
868
		case R300_TX_OFFSET_0+24:
1099
	case RADEON_PP_CUBIC_OFFSET_T1_0:
869
		case R300_TX_OFFSET_0+28:
1100
	case RADEON_PP_CUBIC_OFFSET_T1_1:
870
		case R300_TX_OFFSET_0+32:
1101
	case RADEON_PP_CUBIC_OFFSET_T1_2:
871
		case R300_TX_OFFSET_0+36:
1102
	case RADEON_PP_CUBIC_OFFSET_T1_3:
872
		case R300_TX_OFFSET_0+40:
1103
	case RADEON_PP_CUBIC_OFFSET_T1_4:
-
 
1104
		i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4;
-
 
1105
		r = r100_cs_packet_next_reloc(p, &reloc);
-
 
1106
		if (r) {
-
 
1107
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1108
				  idx, reg);
-
 
1109
			r100_cs_dump_packet(p, pkt);
-
 
1110
			return r;
-
 
1111
			}
-
 
1112
		track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
-
 
1113
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1114
		track->textures[1].cube_info[i].robj = reloc->robj;
-
 
1115
		break;
873
		case R300_TX_OFFSET_0+44:
1116
	case RADEON_PP_CUBIC_OFFSET_T2_0:
874
		case R300_TX_OFFSET_0+48:
1117
	case RADEON_PP_CUBIC_OFFSET_T2_1:
875
		case R300_TX_OFFSET_0+52:
1118
	case RADEON_PP_CUBIC_OFFSET_T2_2:
876
		case R300_TX_OFFSET_0+56:
1119
	case RADEON_PP_CUBIC_OFFSET_T2_3:
877
		case R300_TX_OFFSET_0+60:
1120
	case RADEON_PP_CUBIC_OFFSET_T2_4:
-
 
1121
		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
878
			r = r100_cs_packet_next_reloc(p, &reloc);
1122
			r = r100_cs_packet_next_reloc(p, &reloc);
879
			if (r) {
1123
			if (r) {
880
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1124
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
881
					  idx, reg);
1125
					  idx, reg);
882
				r100_cs_dump_packet(p, pkt);
1126
				r100_cs_dump_packet(p, pkt);
883
				return r;
1127
				return r;
884
			}
1128
			}
-
 
1129
		track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
885
			ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1130
			ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1131
		track->textures[2].cube_info[i].robj = reloc->robj;
-
 
1132
		break;
-
 
1133
	case RADEON_RE_WIDTH_HEIGHT:
-
 
1134
		track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
-
 
1135
			break;
-
 
1136
		case RADEON_RB3D_COLORPITCH:
-
 
1137
			r = r100_cs_packet_next_reloc(p, &reloc);
-
 
1138
			if (r) {
-
 
1139
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1140
					  idx, reg);
-
 
1141
				r100_cs_dump_packet(p, pkt);
-
 
1142
				return r;
-
 
1143
			}
-
 
1144
 
-
 
1145
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-
 
1146
				tile_flags |= RADEON_COLOR_TILE_ENABLE;
-
 
1147
			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-
 
1148
				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
-
 
1149
 
-
 
1150
			tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
-
 
1151
			tmp |= tile_flags;
-
 
1152
			ib[idx] = tmp;
-
 
1153
 
-
 
1154
		track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
-
 
1155
		break;
-
 
1156
	case RADEON_RB3D_DEPTHPITCH:
-
 
1157
		track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
-
 
1158
		break;
-
 
1159
	case RADEON_RB3D_CNTL:
-
 
1160
		switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
-
 
1161
		case 7:
-
 
1162
		case 8:
-
 
1163
		case 9:
-
 
1164
		case 11:
-
 
1165
		case 12:
-
 
1166
			track->cb[0].cpp = 1;
-
 
1167
			break;
-
 
1168
		case 3:
-
 
1169
		case 4:
-
 
1170
		case 15:
-
 
1171
			track->cb[0].cpp = 2;
-
 
1172
			break;
-
 
1173
		case 6:
-
 
1174
			track->cb[0].cpp = 4;
-
 
1175
			break;
-
 
1176
		default:
-
 
1177
			DRM_ERROR("Invalid color buffer format (%d) !\n",
-
 
1178
				  ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
-
 
1179
			return -EINVAL;
-
 
1180
		}
-
 
1181
		track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
-
 
1182
		break;
-
 
1183
	case RADEON_RB3D_ZSTENCILCNTL:
-
 
1184
		switch (ib_chunk->kdata[idx] & 0xf) {
-
 
1185
		case 0:
-
 
1186
			track->zb.cpp = 2;
-
 
1187
			break;
-
 
1188
		case 2:
-
 
1189
		case 3:
-
 
1190
		case 4:
-
 
1191
		case 5:
-
 
1192
		case 9:
-
 
1193
		case 11:
-
 
1194
			track->zb.cpp = 4;
886
			break;
1195
			break;
887
		default:
1196
		default:
888
			/* FIXME: we don't want to allow anyothers packet */
-
 
889
			break;
1197
			break;
890
		}
1198
		}
891
		if (onereg) {
-
 
892
			/* FIXME: forbid onereg write to register on relocate */
-
 
893
			break;
1199
			break;
-
 
1200
		case RADEON_RB3D_ZPASS_ADDR:
-
 
1201
			r = r100_cs_packet_next_reloc(p, &reloc);
-
 
1202
			if (r) {
-
 
1203
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1204
					  idx, reg);
-
 
1205
				r100_cs_dump_packet(p, pkt);
-
 
1206
				return r;
-
 
1207
			}
-
 
1208
			ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1209
			break;
-
 
1210
	case RADEON_PP_CNTL:
-
 
1211
		{
-
 
1212
			uint32_t temp = ib_chunk->kdata[idx] >> 4;
-
 
1213
			for (i = 0; i < track->num_texture; i++)
-
 
1214
				track->textures[i].enabled = !!(temp & (1 << i));
-
 
1215
		}
-
 
1216
			break;
-
 
1217
	case RADEON_SE_VF_CNTL:
-
 
1218
		track->vap_vf_cntl = ib_chunk->kdata[idx];
-
 
1219
		break;
-
 
1220
	case RADEON_SE_VTX_FMT:
-
 
1221
		track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
-
 
1222
		break;
-
 
1223
	case RADEON_PP_TEX_SIZE_0:
-
 
1224
	case RADEON_PP_TEX_SIZE_1:
-
 
1225
	case RADEON_PP_TEX_SIZE_2:
-
 
1226
		i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
-
 
1227
		track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
-
 
1228
		track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
-
 
1229
		break;
-
 
1230
	case RADEON_PP_TEX_PITCH_0:
-
 
1231
	case RADEON_PP_TEX_PITCH_1:
-
 
1232
	case RADEON_PP_TEX_PITCH_2:
-
 
1233
		i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
-
 
1234
		track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
-
 
1235
		break;
-
 
1236
	case RADEON_PP_TXFILTER_0:
-
 
1237
	case RADEON_PP_TXFILTER_1:
-
 
1238
	case RADEON_PP_TXFILTER_2:
-
 
1239
		i = (reg - RADEON_PP_TXFILTER_0) / 24;
-
 
1240
		track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
-
 
1241
						 >> RADEON_MAX_MIP_LEVEL_SHIFT);
-
 
1242
		tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
-
 
1243
		if (tmp == 2 || tmp == 6)
-
 
1244
			track->textures[i].roundup_w = false;
-
 
1245
		tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
-
 
1246
		if (tmp == 2 || tmp == 6)
-
 
1247
			track->textures[i].roundup_h = false;
-
 
1248
		break;
-
 
1249
	case RADEON_PP_TXFORMAT_0:
-
 
1250
	case RADEON_PP_TXFORMAT_1:
-
 
1251
	case RADEON_PP_TXFORMAT_2:
-
 
1252
		i = (reg - RADEON_PP_TXFORMAT_0) / 24;
-
 
1253
		if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
-
 
1254
			track->textures[i].use_pitch = 1;
-
 
1255
		} else {
-
 
1256
			track->textures[i].use_pitch = 0;
-
 
1257
			track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
-
 
1258
			track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
-
 
1259
		}
-
 
1260
		if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
-
 
1261
			track->textures[i].tex_coord_type = 2;
-
 
1262
		switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
-
 
1263
		case RADEON_TXFORMAT_I8:
-
 
1264
		case RADEON_TXFORMAT_RGB332:
-
 
1265
		case RADEON_TXFORMAT_Y8:
-
 
1266
			track->textures[i].cpp = 1;
-
 
1267
			break;
-
 
1268
		case RADEON_TXFORMAT_AI88:
-
 
1269
		case RADEON_TXFORMAT_ARGB1555:
-
 
1270
		case RADEON_TXFORMAT_RGB565:
-
 
1271
		case RADEON_TXFORMAT_ARGB4444:
-
 
1272
		case RADEON_TXFORMAT_VYUY422:
-
 
1273
		case RADEON_TXFORMAT_YVYU422:
-
 
1274
		case RADEON_TXFORMAT_DXT1:
-
 
1275
		case RADEON_TXFORMAT_SHADOW16:
-
 
1276
		case RADEON_TXFORMAT_LDUDV655:
-
 
1277
		case RADEON_TXFORMAT_DUDV88:
-
 
1278
			track->textures[i].cpp = 2;
-
 
1279
			break;
-
 
1280
		case RADEON_TXFORMAT_ARGB8888:
-
 
1281
		case RADEON_TXFORMAT_RGBA8888:
-
 
1282
		case RADEON_TXFORMAT_DXT23:
-
 
1283
		case RADEON_TXFORMAT_DXT45:
-
 
1284
		case RADEON_TXFORMAT_SHADOW32:
-
 
1285
		case RADEON_TXFORMAT_LDUDUV8888:
-
 
1286
			track->textures[i].cpp = 4;
-
 
1287
			break;
-
 
1288
		}
-
 
1289
		track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
-
 
1290
		track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
-
 
1291
		break;
-
 
1292
	case RADEON_PP_CUBIC_FACES_0:
-
 
1293
	case RADEON_PP_CUBIC_FACES_1:
-
 
1294
	case RADEON_PP_CUBIC_FACES_2:
-
 
1295
		tmp = ib_chunk->kdata[idx];
-
 
1296
		i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
-
 
1297
		for (face = 0; face < 4; face++) {
-
 
1298
			track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
-
 
1299
			track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
894
		}
1300
		}
-
 
1301
		break;
-
 
1302
	default:
-
 
1303
		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
-
 
1304
		       reg, idx);
-
 
1305
		return -EINVAL;
895
	}
1306
	}
896
	return 0;
1307
	return 0;
897
}
1308
}
898
 
1309
 
899
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1310
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
900
					 struct radeon_cs_packet *pkt,
1311
					 struct radeon_cs_packet *pkt,
901
					 struct radeon_object *robj)
1312
					 struct radeon_object *robj)
902
{
1313
{
903
	struct radeon_cs_chunk *ib_chunk;
1314
	struct radeon_cs_chunk *ib_chunk;
904
	unsigned idx;
1315
	unsigned idx;
905
 
1316
 
906
	ib_chunk = &p->chunks[p->chunk_ib_idx];
1317
	ib_chunk = &p->chunks[p->chunk_ib_idx];
907
	idx = pkt->idx + 1;
1318
	idx = pkt->idx + 1;
908
	if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
1319
	if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
909
		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1320
		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
910
			  "(need %u have %lu) !\n",
1321
			  "(need %u have %lu) !\n",
911
			  ib_chunk->kdata[idx+2] + 1,
1322
			  ib_chunk->kdata[idx+2] + 1,
912
			  radeon_object_size(robj));
1323
			  radeon_object_size(robj));
913
		return -EINVAL;
1324
		return -EINVAL;
914
	}
1325
	}
915
	return 0;
1326
	return 0;
916
}
1327
}
917
 
1328
 
918
static int r100_packet3_check(struct radeon_cs_parser *p,
1329
static int r100_packet3_check(struct radeon_cs_parser *p,
919
			      struct radeon_cs_packet *pkt)
1330
			      struct radeon_cs_packet *pkt)
920
{
1331
{
921
	struct radeon_cs_chunk *ib_chunk;
1332
	struct radeon_cs_chunk *ib_chunk;
922
	struct radeon_cs_reloc *reloc;
1333
	struct radeon_cs_reloc *reloc;
-
 
1334
	struct r100_cs_track *track;
923
	unsigned idx;
1335
	unsigned idx;
924
	unsigned i, c;
1336
	unsigned i, c;
925
	volatile uint32_t *ib;
1337
	volatile uint32_t *ib;
926
	int r;
1338
	int r;
927
 
1339
 
928
	ib = p->ib->ptr;
1340
	ib = p->ib->ptr;
929
	ib_chunk = &p->chunks[p->chunk_ib_idx];
1341
	ib_chunk = &p->chunks[p->chunk_ib_idx];
930
	idx = pkt->idx + 1;
1342
	idx = pkt->idx + 1;
-
 
1343
	track = (struct r100_cs_track *)p->track;
931
	switch (pkt->opcode) {
1344
	switch (pkt->opcode) {
932
	case PACKET3_3D_LOAD_VBPNTR:
1345
	case PACKET3_3D_LOAD_VBPNTR:
933
		c = ib_chunk->kdata[idx++];
1346
		c = ib_chunk->kdata[idx++];
-
 
1347
		track->num_arrays = c;
934
		for (i = 0; i < (c - 1); i += 2, idx += 3) {
1348
		for (i = 0; i < (c - 1); i += 2, idx += 3) {
935
			r = r100_cs_packet_next_reloc(p, &reloc);
1349
			r = r100_cs_packet_next_reloc(p, &reloc);
936
			if (r) {
1350
			if (r) {
937
				DRM_ERROR("No reloc for packet3 %d\n",
1351
				DRM_ERROR("No reloc for packet3 %d\n",
938
					  pkt->opcode);
1352
					  pkt->opcode);
939
				r100_cs_dump_packet(p, pkt);
1353
				r100_cs_dump_packet(p, pkt);
940
				return r;
1354
				return r;
941
			}
1355
			}
942
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1356
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-
 
1357
			track->arrays[i + 0].robj = reloc->robj;
-
 
1358
			track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-
 
1359
			track->arrays[i + 0].esize &= 0x7F;
943
			r = r100_cs_packet_next_reloc(p, &reloc);
1360
			r = r100_cs_packet_next_reloc(p, &reloc);
944
			if (r) {
1361
			if (r) {
945
				DRM_ERROR("No reloc for packet3 %d\n",
1362
				DRM_ERROR("No reloc for packet3 %d\n",
946
					  pkt->opcode);
1363
					  pkt->opcode);
947
				r100_cs_dump_packet(p, pkt);
1364
				r100_cs_dump_packet(p, pkt);
948
				return r;
1365
				return r;
949
			}
1366
			}
950
			ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1367
			ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
-
 
1368
			track->arrays[i + 1].robj = reloc->robj;
-
 
1369
			track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
-
 
1370
			track->arrays[i + 1].esize &= 0x7F;
951
		}
1371
		}
952
		if (c & 1) {
1372
		if (c & 1) {
953
			r = r100_cs_packet_next_reloc(p, &reloc);
1373
			r = r100_cs_packet_next_reloc(p, &reloc);
954
			if (r) {
1374
			if (r) {
955
				DRM_ERROR("No reloc for packet3 %d\n",
1375
				DRM_ERROR("No reloc for packet3 %d\n",
956
					  pkt->opcode);
1376
					  pkt->opcode);
957
				r100_cs_dump_packet(p, pkt);
1377
				r100_cs_dump_packet(p, pkt);
958
				return r;
1378
				return r;
959
			}
1379
			}
960
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1380
			ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
-
 
1381
			track->arrays[i + 0].robj = reloc->robj;
-
 
1382
			track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
-
 
1383
			track->arrays[i + 0].esize &= 0x7F;
961
		}
1384
		}
962
		break;
1385
		break;
963
	case PACKET3_INDX_BUFFER:
1386
	case PACKET3_INDX_BUFFER:
964
		r = r100_cs_packet_next_reloc(p, &reloc);
1387
		r = r100_cs_packet_next_reloc(p, &reloc);
965
		if (r) {
1388
		if (r) {
966
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1389
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
967
			r100_cs_dump_packet(p, pkt);
1390
			r100_cs_dump_packet(p, pkt);
968
			return r;
1391
			return r;
969
		}
1392
		}
970
		ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1393
		ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
971
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1394
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
972
		if (r) {
1395
		if (r) {
973
			return r;
1396
			return r;
974
		}
1397
		}
975
		break;
1398
		break;
976
	case 0x23:
1399
	case 0x23:
977
		/* FIXME: cleanup */
-
 
978
		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
1400
		/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */
979
		r = r100_cs_packet_next_reloc(p, &reloc);
1401
		r = r100_cs_packet_next_reloc(p, &reloc);
980
		if (r) {
1402
		if (r) {
981
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1403
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
982
			r100_cs_dump_packet(p, pkt);
1404
			r100_cs_dump_packet(p, pkt);
983
			return r;
1405
			return r;
984
		}
1406
		}
985
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
1407
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1408
		track->num_arrays = 1;
-
 
1409
		track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
-
 
1410
 
-
 
1411
		track->arrays[0].robj = reloc->robj;
-
 
1412
		track->arrays[0].esize = track->vtx_size;
-
 
1413
 
-
 
1414
		track->max_indx = ib_chunk->kdata[idx+1];
-
 
1415
 
-
 
1416
		track->vap_vf_cntl = ib_chunk->kdata[idx+3];
-
 
1417
		track->immd_dwords = pkt->count - 1;
-
 
1418
		r = r100_cs_track_check(p->rdev, track);
-
 
1419
		if (r)
-
 
1420
			return r;
986
		break;
1421
		break;
987
	case PACKET3_3D_DRAW_IMMD:
1422
	case PACKET3_3D_DRAW_IMMD:
-
 
1423
		if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
-
 
1424
			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
-
 
1425
			return -EINVAL;
-
 
1426
		}
-
 
1427
		track->vap_vf_cntl = ib_chunk->kdata[idx+1];
-
 
1428
		track->immd_dwords = pkt->count - 1;
-
 
1429
		r = r100_cs_track_check(p->rdev, track);
-
 
1430
		if (r)
-
 
1431
			return r;
-
 
1432
		break;
988
		/* triggers drawing using in-packet vertex data */
1433
		/* triggers drawing using in-packet vertex data */
989
	case PACKET3_3D_DRAW_IMMD_2:
1434
	case PACKET3_3D_DRAW_IMMD_2:
-
 
1435
		if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
-
 
1436
			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
-
 
1437
			return -EINVAL;
-
 
1438
		}
-
 
1439
		track->vap_vf_cntl = ib_chunk->kdata[idx];
-
 
1440
		track->immd_dwords = pkt->count;
-
 
1441
		r = r100_cs_track_check(p->rdev, track);
-
 
1442
		if (r)
-
 
1443
			return r;
-
 
1444
		break;
990
		/* triggers drawing using in-packet vertex data */
1445
		/* triggers drawing using in-packet vertex data */
991
	case PACKET3_3D_DRAW_VBUF_2:
1446
	case PACKET3_3D_DRAW_VBUF_2:
-
 
1447
		track->vap_vf_cntl = ib_chunk->kdata[idx];
-
 
1448
		r = r100_cs_track_check(p->rdev, track);
-
 
1449
		if (r)
-
 
1450
			return r;
-
 
1451
		break;
992
		/* triggers drawing of vertex buffers setup elsewhere */
1452
		/* triggers drawing of vertex buffers setup elsewhere */
993
	case PACKET3_3D_DRAW_INDX_2:
1453
	case PACKET3_3D_DRAW_INDX_2:
-
 
1454
		track->vap_vf_cntl = ib_chunk->kdata[idx];
-
 
1455
		r = r100_cs_track_check(p->rdev, track);
-
 
1456
		if (r)
-
 
1457
			return r;
-
 
1458
		break;
994
		/* triggers drawing using indices to vertex buffer */
1459
		/* triggers drawing using indices to vertex buffer */
995
	case PACKET3_3D_DRAW_VBUF:
1460
	case PACKET3_3D_DRAW_VBUF:
-
 
1461
		track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
-
 
1462
		r = r100_cs_track_check(p->rdev, track);
-
 
1463
		if (r)
-
 
1464
			return r;
-
 
1465
		break;
996
		/* triggers drawing of vertex buffers setup elsewhere */
1466
		/* triggers drawing of vertex buffers setup elsewhere */
997
	case PACKET3_3D_DRAW_INDX:
1467
	case PACKET3_3D_DRAW_INDX:
-
 
1468
		track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
-
 
1469
		r = r100_cs_track_check(p->rdev, track);
-
 
1470
		if (r)
-
 
1471
			return r;
-
 
1472
		break;
998
		/* triggers drawing using indices to vertex buffer */
1473
		/* triggers drawing using indices to vertex buffer */
999
	case PACKET3_NOP:
1474
	case PACKET3_NOP:
1000
		break;
1475
		break;
1001
	default:
1476
	default:
1002
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1477
		DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
1003
		return -EINVAL;
1478
		return -EINVAL;
1004
	}
1479
	}
1005
	return 0;
1480
	return 0;
1006
}
1481
}
1007
 
1482
 
1008
int r100_cs_parse(struct radeon_cs_parser *p)
1483
int r100_cs_parse(struct radeon_cs_parser *p)
1009
{
1484
{
1010
	struct radeon_cs_packet pkt;
1485
	struct radeon_cs_packet pkt;
-
 
1486
	struct r100_cs_track *track;
1011
	int r;
1487
	int r;
-
 
1488
 
-
 
1489
	track = kzalloc(sizeof(*track), GFP_KERNEL);
-
 
1490
	r100_cs_track_clear(p->rdev, track);
1012
 
1491
	p->track = track;
1013
	do {
1492
	do {
1014
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1493
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1015
		if (r) {
1494
		if (r) {
1016
			return r;
1495
			return r;
1017
		}
1496
		}
1018
		p->idx += pkt.count + 2;
1497
		p->idx += pkt.count + 2;
1019
		switch (pkt.type) {
1498
		switch (pkt.type) {
1020
			case PACKET_TYPE0:
1499
			case PACKET_TYPE0:
-
 
1500
				if (p->rdev->family >= CHIP_R200)
1021
				r = r100_packet0_check(p, &pkt);
1501
					r = r100_cs_parse_packet0(p, &pkt,
-
 
1502
								  p->rdev->config.r100.reg_safe_bm,
-
 
1503
								  p->rdev->config.r100.reg_safe_bm_size,
-
 
1504
								  &r200_packet0_check);
-
 
1505
				else
-
 
1506
					r = r100_cs_parse_packet0(p, &pkt,
-
 
1507
								  p->rdev->config.r100.reg_safe_bm,
-
 
1508
								  p->rdev->config.r100.reg_safe_bm_size,
-
 
1509
								  &r100_packet0_check);
1022
				break;
1510
				break;
1023
			case PACKET_TYPE2:
1511
			case PACKET_TYPE2:
1024
				break;
1512
				break;
1025
			case PACKET_TYPE3:
1513
			case PACKET_TYPE3:
1026
				r = r100_packet3_check(p, &pkt);
1514
				r = r100_packet3_check(p, &pkt);
1027
				break;
1515
				break;
1028
			default:
1516
			default:
1029
				DRM_ERROR("Unknown packet type %d !\n",
1517
				DRM_ERROR("Unknown packet type %d !\n",
1030
					  pkt.type);
1518
					  pkt.type);
1031
				return -EINVAL;
1519
				return -EINVAL;
1032
		}
1520
		}
1033
		if (r) {
1521
		if (r) {
1034
			return r;
1522
			return r;
1035
		}
1523
		}
1036
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1524
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1037
	return 0;
1525
	return 0;
1038
}
1526
}
1039
 
1527
 
1040
#endif
1528
#endif
1041
 
1529
 
1042
/*
1530
/*
1043
 * Global GPU functions
1531
 * Global GPU functions
1044
 */
1532
 */
1045
void r100_errata(struct radeon_device *rdev)
1533
void r100_errata(struct radeon_device *rdev)
1046
{
1534
{
1047
	rdev->pll_errata = 0;
1535
	rdev->pll_errata = 0;
1048
 
1536
 
1049
	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1537
	if (rdev->family == CHIP_RV200 || rdev->family == CHIP_RS200) {
1050
		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1538
		rdev->pll_errata |= CHIP_ERRATA_PLL_DUMMYREADS;
1051
	}
1539
	}
1052
 
1540
 
1053
	if (rdev->family == CHIP_RV100 ||
1541
	if (rdev->family == CHIP_RV100 ||
1054
	    rdev->family == CHIP_RS100 ||
1542
	    rdev->family == CHIP_RS100 ||
1055
	    rdev->family == CHIP_RS200) {
1543
	    rdev->family == CHIP_RS200) {
1056
		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1544
		rdev->pll_errata |= CHIP_ERRATA_PLL_DELAY;
1057
	}
1545
	}
1058
}
1546
}
1059
 
-
 
1060
 
-
 
1061
 
1547
 
1062
/* Wait for vertical sync on primary CRTC */
1548
/* Wait for vertical sync on primary CRTC */
1063
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1549
void r100_gpu_wait_for_vsync(struct radeon_device *rdev)
1064
{
1550
{
1065
	uint32_t crtc_gen_cntl, tmp;
1551
	uint32_t crtc_gen_cntl, tmp;
1066
	int i;
1552
	int i;
1067
 
1553
 
1068
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1554
	crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL);
1069
	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1555
	if ((crtc_gen_cntl & RADEON_CRTC_DISP_REQ_EN_B) ||
1070
	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1556
	    !(crtc_gen_cntl & RADEON_CRTC_EN)) {
1071
		return;
1557
		return;
1072
	}
1558
	}
1073
	/* Clear the CRTC_VBLANK_SAVE bit */
1559
	/* Clear the CRTC_VBLANK_SAVE bit */
1074
	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1560
	WREG32(RADEON_CRTC_STATUS, RADEON_CRTC_VBLANK_SAVE_CLEAR);
1075
	for (i = 0; i < rdev->usec_timeout; i++) {
1561
	for (i = 0; i < rdev->usec_timeout; i++) {
1076
		tmp = RREG32(RADEON_CRTC_STATUS);
1562
		tmp = RREG32(RADEON_CRTC_STATUS);
1077
		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1563
		if (tmp & RADEON_CRTC_VBLANK_SAVE) {
1078
			return;
1564
			return;
1079
		}
1565
		}
1080
		DRM_UDELAY(1);
1566
		DRM_UDELAY(1);
1081
	}
1567
	}
1082
}
1568
}
1083
 
1569
 
1084
/* Wait for vertical sync on secondary CRTC */
1570
/* Wait for vertical sync on secondary CRTC */
1085
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1571
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev)
1086
{
1572
{
1087
	uint32_t crtc2_gen_cntl, tmp;
1573
	uint32_t crtc2_gen_cntl, tmp;
1088
	int i;
1574
	int i;
1089
 
1575
 
1090
	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1576
	crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
1091
	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1577
	if ((crtc2_gen_cntl & RADEON_CRTC2_DISP_REQ_EN_B) ||
1092
	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1578
	    !(crtc2_gen_cntl & RADEON_CRTC2_EN))
1093
		return;
1579
		return;
1094
 
1580
 
1095
	/* Clear the CRTC_VBLANK_SAVE bit */
1581
	/* Clear the CRTC_VBLANK_SAVE bit */
1096
	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1582
	WREG32(RADEON_CRTC2_STATUS, RADEON_CRTC2_VBLANK_SAVE_CLEAR);
1097
	for (i = 0; i < rdev->usec_timeout; i++) {
1583
	for (i = 0; i < rdev->usec_timeout; i++) {
1098
		tmp = RREG32(RADEON_CRTC2_STATUS);
1584
		tmp = RREG32(RADEON_CRTC2_STATUS);
1099
		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1585
		if (tmp & RADEON_CRTC2_VBLANK_SAVE) {
1100
			return;
1586
			return;
1101
		}
1587
		}
1102
		DRM_UDELAY(1);
1588
		DRM_UDELAY(1);
1103
	}
1589
	}
1104
}
1590
}
1105
 
1591
 
1106
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1592
int r100_rbbm_fifo_wait_for_entry(struct radeon_device *rdev, unsigned n)
1107
{
1593
{
1108
	unsigned i;
1594
	unsigned i;
1109
	uint32_t tmp;
1595
	uint32_t tmp;
1110
 
1596
 
1111
	for (i = 0; i < rdev->usec_timeout; i++) {
1597
	for (i = 0; i < rdev->usec_timeout; i++) {
1112
		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1598
		tmp = RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_FIFOCNT_MASK;
1113
		if (tmp >= n) {
1599
		if (tmp >= n) {
1114
			return 0;
1600
			return 0;
1115
		}
1601
		}
1116
		DRM_UDELAY(1);
1602
		DRM_UDELAY(1);
1117
	}
1603
	}
1118
	return -1;
1604
	return -1;
1119
}
1605
}
1120
 
1606
 
1121
int r100_gui_wait_for_idle(struct radeon_device *rdev)
1607
int r100_gui_wait_for_idle(struct radeon_device *rdev)
1122
{
1608
{
1123
	unsigned i;
1609
	unsigned i;
1124
	uint32_t tmp;
1610
	uint32_t tmp;
1125
 
1611
 
1126
	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1612
	if (r100_rbbm_fifo_wait_for_entry(rdev, 64)) {
1127
		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1613
		printk(KERN_WARNING "radeon: wait for empty RBBM fifo failed !"
1128
		       " Bad things might happen.\n");
1614
		       " Bad things might happen.\n");
1129
	}
1615
	}
1130
	for (i = 0; i < rdev->usec_timeout; i++) {
1616
	for (i = 0; i < rdev->usec_timeout; i++) {
1131
		tmp = RREG32(RADEON_RBBM_STATUS);
1617
		tmp = RREG32(RADEON_RBBM_STATUS);
1132
		if (!(tmp & (1 << 31))) {
1618
		if (!(tmp & (1 << 31))) {
1133
			return 0;
1619
			return 0;
1134
		}
1620
		}
1135
		DRM_UDELAY(1);
1621
		DRM_UDELAY(1);
1136
	}
1622
	}
1137
	return -1;
1623
	return -1;
1138
}
1624
}
1139
 
1625
 
1140
int r100_mc_wait_for_idle(struct radeon_device *rdev)
1626
int r100_mc_wait_for_idle(struct radeon_device *rdev)
1141
{
1627
{
1142
	unsigned i;
1628
	unsigned i;
1143
	uint32_t tmp;
1629
	uint32_t tmp;
1144
 
1630
 
1145
	for (i = 0; i < rdev->usec_timeout; i++) {
1631
	for (i = 0; i < rdev->usec_timeout; i++) {
1146
		/* read MC_STATUS */
1632
		/* read MC_STATUS */
1147
		tmp = RREG32(0x0150);
1633
		tmp = RREG32(0x0150);
1148
		if (tmp & (1 << 2)) {
1634
		if (tmp & (1 << 2)) {
1149
			return 0;
1635
			return 0;
1150
		}
1636
		}
1151
		DRM_UDELAY(1);
1637
		DRM_UDELAY(1);
1152
	}
1638
	}
1153
	return -1;
1639
	return -1;
1154
}
1640
}
1155
 
1641
 
1156
void r100_gpu_init(struct radeon_device *rdev)
1642
void r100_gpu_init(struct radeon_device *rdev)
1157
{
1643
{
1158
	/* TODO: anythings to do here ? pipes ? */
1644
	/* TODO: anythings to do here ? pipes ? */
1159
	r100_hdp_reset(rdev);
1645
	r100_hdp_reset(rdev);
1160
}
1646
}
1161
 
1647
 
1162
void r100_hdp_reset(struct radeon_device *rdev)
1648
void r100_hdp_reset(struct radeon_device *rdev)
1163
{
1649
{
1164
	uint32_t tmp;
1650
	uint32_t tmp;
1165
 
1651
 
1166
    dbgprintf("%s\n",__FUNCTION__);
1652
    ENTER();
1167
 
1653
 
1168
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1654
	tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL;
1169
	tmp |= (7 << 28);
1655
	tmp |= (7 << 28);
1170
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1656
	WREG32(RADEON_HOST_PATH_CNTL, tmp | RADEON_HDP_SOFT_RESET | RADEON_HDP_READ_BUFFER_INVALIDATE);
1171
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1657
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1172
	udelay(200);
1658
	udelay(200);
1173
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1659
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1174
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
1660
	WREG32(RADEON_HOST_PATH_CNTL, tmp);
1175
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1661
	(void)RREG32(RADEON_HOST_PATH_CNTL);
1176
}
1662
}
1177
 
1663
 
1178
int r100_rb2d_reset(struct radeon_device *rdev)
1664
int r100_rb2d_reset(struct radeon_device *rdev)
1179
{
1665
{
1180
	uint32_t tmp;
1666
	uint32_t tmp;
1181
	int i;
1667
	int i;
1182
 
1668
 
1183
    dbgprintf("%s\n",__FUNCTION__);
1669
       ENTER();
1184
 
1670
 
1185
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1671
	WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2);
1186
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
1672
	(void)RREG32(RADEON_RBBM_SOFT_RESET);
1187
	udelay(200);
1673
	udelay(200);
1188
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1674
	WREG32(RADEON_RBBM_SOFT_RESET, 0);
1189
	/* Wait to prevent race in RBBM_STATUS */
1675
	/* Wait to prevent race in RBBM_STATUS */
1190
	mdelay(1);
1676
	mdelay(1);
1191
	for (i = 0; i < rdev->usec_timeout; i++) {
1677
	for (i = 0; i < rdev->usec_timeout; i++) {
1192
		tmp = RREG32(RADEON_RBBM_STATUS);
1678
		tmp = RREG32(RADEON_RBBM_STATUS);
1193
		if (!(tmp & (1 << 26))) {
1679
		if (!(tmp & (1 << 26))) {
1194
			DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1680
			DRM_INFO("RB2D reset succeed (RBBM_STATUS=0x%08X)\n",
1195
				 tmp);
1681
				 tmp);
1196
			return 0;
1682
			return 0;
1197
		}
1683
		}
1198
		DRM_UDELAY(1);
1684
		DRM_UDELAY(1);
1199
	}
1685
	}
1200
	tmp = RREG32(RADEON_RBBM_STATUS);
1686
	tmp = RREG32(RADEON_RBBM_STATUS);
1201
	DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1687
	DRM_ERROR("Failed to reset RB2D (RBBM_STATUS=0x%08X)!\n", tmp);
1202
	return -1;
1688
	return -1;
1203
}
1689
}
1204
 
1690
 
1205
int r100_gpu_reset(struct radeon_device *rdev)
1691
int r100_gpu_reset(struct radeon_device *rdev)
1206
{
1692
{
1207
	uint32_t status;
1693
	uint32_t status;
1208
 
1694
 
1209
	/* reset order likely matter */
1695
	/* reset order likely matter */
1210
	status = RREG32(RADEON_RBBM_STATUS);
1696
	status = RREG32(RADEON_RBBM_STATUS);
1211
	/* reset HDP */
1697
	/* reset HDP */
1212
	r100_hdp_reset(rdev);
1698
	r100_hdp_reset(rdev);
1213
	/* reset rb2d */
1699
	/* reset rb2d */
1214
	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1700
	if (status & ((1 << 17) | (1 << 18) | (1 << 27))) {
1215
		r100_rb2d_reset(rdev);
1701
		r100_rb2d_reset(rdev);
1216
	}
1702
	}
1217
	/* TODO: reset 3D engine */
1703
	/* TODO: reset 3D engine */
1218
	/* reset CP */
1704
	/* reset CP */
1219
	status = RREG32(RADEON_RBBM_STATUS);
1705
	status = RREG32(RADEON_RBBM_STATUS);
1220
	if (status & (1 << 16)) {
1706
	if (status & (1 << 16)) {
1221
		r100_cp_reset(rdev);
1707
		r100_cp_reset(rdev);
1222
	}
1708
	}
1223
	/* Check if GPU is idle */
1709
	/* Check if GPU is idle */
1224
	status = RREG32(RADEON_RBBM_STATUS);
1710
	status = RREG32(RADEON_RBBM_STATUS);
1225
	if (status & (1 << 31)) {
1711
	if (status & (1 << 31)) {
1226
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1712
		DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
1227
		return -1;
1713
		return -1;
1228
	}
1714
	}
1229
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1715
	DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status);
1230
	return 0;
1716
	return 0;
1231
}
1717
}
1232
 
1718
 
1233
 
1719
 
1234
/*
1720
/*
1235
 * VRAM info
1721
 * VRAM info
1236
 */
1722
 */
1237
static void r100_vram_get_type(struct radeon_device *rdev)
1723
static void r100_vram_get_type(struct radeon_device *rdev)
1238
{
1724
{
1239
	uint32_t tmp;
1725
	uint32_t tmp;
1240
 
1726
 
1241
	rdev->mc.vram_is_ddr = false;
1727
	rdev->mc.vram_is_ddr = false;
1242
	if (rdev->flags & RADEON_IS_IGP)
1728
	if (rdev->flags & RADEON_IS_IGP)
1243
		rdev->mc.vram_is_ddr = true;
1729
		rdev->mc.vram_is_ddr = true;
1244
	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1730
	else if (RREG32(RADEON_MEM_SDRAM_MODE_REG) & RADEON_MEM_CFG_TYPE_DDR)
1245
		rdev->mc.vram_is_ddr = true;
1731
		rdev->mc.vram_is_ddr = true;
1246
	if ((rdev->family == CHIP_RV100) ||
1732
	if ((rdev->family == CHIP_RV100) ||
1247
	    (rdev->family == CHIP_RS100) ||
1733
	    (rdev->family == CHIP_RS100) ||
1248
	    (rdev->family == CHIP_RS200)) {
1734
	    (rdev->family == CHIP_RS200)) {
1249
		tmp = RREG32(RADEON_MEM_CNTL);
1735
		tmp = RREG32(RADEON_MEM_CNTL);
1250
		if (tmp & RV100_HALF_MODE) {
1736
		if (tmp & RV100_HALF_MODE) {
1251
			rdev->mc.vram_width = 32;
1737
			rdev->mc.vram_width = 32;
1252
		} else {
1738
		} else {
1253
			rdev->mc.vram_width = 64;
1739
			rdev->mc.vram_width = 64;
1254
		}
1740
		}
1255
		if (rdev->flags & RADEON_SINGLE_CRTC) {
1741
		if (rdev->flags & RADEON_SINGLE_CRTC) {
1256
			rdev->mc.vram_width /= 4;
1742
			rdev->mc.vram_width /= 4;
1257
			rdev->mc.vram_is_ddr = true;
1743
			rdev->mc.vram_is_ddr = true;
1258
		}
1744
		}
1259
	} else if (rdev->family <= CHIP_RV280) {
1745
	} else if (rdev->family <= CHIP_RV280) {
1260
		tmp = RREG32(RADEON_MEM_CNTL);
1746
		tmp = RREG32(RADEON_MEM_CNTL);
1261
		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1747
		if (tmp & RADEON_MEM_NUM_CHANNELS_MASK) {
1262
			rdev->mc.vram_width = 128;
1748
			rdev->mc.vram_width = 128;
1263
		} else {
1749
		} else {
1264
			rdev->mc.vram_width = 64;
1750
			rdev->mc.vram_width = 64;
1265
		}
1751
		}
1266
	} else {
1752
	} else {
1267
		/* newer IGPs */
1753
		/* newer IGPs */
1268
		rdev->mc.vram_width = 128;
1754
		rdev->mc.vram_width = 128;
1269
	}
1755
	}
1270
}
1756
}
1271
 
1757
 
1272
void r100_vram_info(struct radeon_device *rdev)
1758
static u32 r100_get_accessible_vram(struct radeon_device *rdev)
-
 
1759
{
-
 
1760
	u32 aper_size;
-
 
1761
	u8 byte;
-
 
1762
 
-
 
1763
	aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
-
 
1764
 
-
 
1765
	/* Set HDP_APER_CNTL only on cards that are known not to be broken,
-
 
1766
	 * that is has the 2nd generation multifunction PCI interface
-
 
1767
	 */
-
 
1768
	if (rdev->family == CHIP_RV280 ||
-
 
1769
	    rdev->family >= CHIP_RV350) {
-
 
1770
		WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL,
-
 
1771
		       ~RADEON_HDP_APER_CNTL);
1273
{
1772
		DRM_INFO("Generation 2 PCI interface, using max accessible memory\n");
-
 
1773
		return aper_size * 2;
-
 
1774
	}
-
 
1775
 
-
 
1776
	/* Older cards have all sorts of funny issues to deal with. First
-
 
1777
	 * check if it's a multifunction card by reading the PCI config
-
 
1778
	 * header type... Limit those to one aperture size
-
 
1779
	 */
-
 
1780
//   pci_read_config_byte(rdev->pdev, 0xe, &byte);
-
 
1781
//   if (byte & 0x80) {
-
 
1782
//       DRM_INFO("Generation 1 PCI interface in multifunction mode\n");
-
 
1783
//       DRM_INFO("Limiting VRAM to one aperture\n");
-
 
1784
//       return aper_size;
-
 
1785
//   }
-
 
1786
 
-
 
1787
	/* Single function older card. We read HDP_APER_CNTL to see how the BIOS
-
 
1788
	 * have set it up. We don't write this as it's broken on some ASICs but
-
 
1789
	 * we expect the BIOS to have done the right thing (might be too optimistic...)
-
 
1790
	 */
-
 
1791
	if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL)
-
 
1792
		return aper_size * 2;
-
 
1793
	return aper_size;
-
 
1794
}
-
 
1795
 
-
 
1796
void r100_vram_init_sizes(struct radeon_device *rdev)
-
 
1797
{
-
 
1798
	u64 config_aper_size;
-
 
1799
	u32 accessible;
-
 
1800
 
1274
	r100_vram_get_type(rdev);
1801
	config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
1275
 
1802
 
1276
	if (rdev->flags & RADEON_IS_IGP) {
1803
	if (rdev->flags & RADEON_IS_IGP) {
1277
		uint32_t tom;
1804
		uint32_t tom;
1278
		/* read NB_TOM to get the amount of ram stolen for the GPU */
1805
		/* read NB_TOM to get the amount of ram stolen for the GPU */
1279
		tom = RREG32(RADEON_NB_TOM);
1806
		tom = RREG32(RADEON_NB_TOM);
1280
		rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
1807
		rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
-
 
1808
		/* for IGPs we need to keep VRAM where it was put by the BIOS */
-
 
1809
		rdev->mc.vram_location = (tom & 0xffff) << 16;
1281
		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1810
		WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
-
 
1811
		rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1282
	} else {
1812
	} else {
1283
		rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1813
		rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
1284
		/* Some production boards of m6 will report 0
1814
		/* Some production boards of m6 will report 0
1285
		 * if it's 8 MB
1815
		 * if it's 8 MB
1286
		 */
1816
		 */
1287
		if (rdev->mc.vram_size == 0) {
1817
		if (rdev->mc.real_vram_size == 0) {
1288
			rdev->mc.vram_size = 8192 * 1024;
1818
			rdev->mc.real_vram_size = 8192 * 1024;
1289
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size);
1819
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
1290
		}
1820
		}
-
 
1821
		/* let driver place VRAM */
-
 
1822
		rdev->mc.vram_location = 0xFFFFFFFFUL;
-
 
1823
		 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
-
 
1824
		  * Novell bug 204882 + along with lots of ubuntu ones */
-
 
1825
		if (config_aper_size > rdev->mc.real_vram_size)
-
 
1826
			rdev->mc.mc_vram_size = config_aper_size;
-
 
1827
		else
-
 
1828
			rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
1291
	}
1829
	}
-
 
1830
 
-
 
1831
	/* work out accessible VRAM */
-
 
1832
	accessible = r100_get_accessible_vram(rdev);
1292
 
1833
 
1293
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
1834
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
-
 
1835
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
 
1836
 
-
 
1837
	if (accessible > rdev->mc.aper_size)
-
 
1838
		accessible = rdev->mc.aper_size;
-
 
1839
 
-
 
1840
	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
-
 
1841
		rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
 
1842
 
-
 
1843
	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
-
 
1844
		rdev->mc.real_vram_size = rdev->mc.aper_size;
-
 
1845
}
-
 
1846
 
-
 
1847
void r100_vga_set_state(struct radeon_device *rdev, bool state)
-
 
1848
{
-
 
1849
	uint32_t temp;
-
 
1850
 
-
 
1851
	temp = RREG32(RADEON_CONFIG_CNTL);
-
 
1852
	if (state == false) {
-
 
1853
		temp &= ~(1<<8);
-
 
1854
		temp |= (1<<9);
-
 
1855
	} else {
-
 
1856
		temp &= ~(1<<9);
-
 
1857
	}
-
 
1858
	WREG32(RADEON_CONFIG_CNTL, temp);
-
 
1859
}
-
 
1860
 
-
 
1861
void r100_vram_info(struct radeon_device *rdev)
-
 
1862
{
-
 
1863
	r100_vram_get_type(rdev);
-
 
1864
 
1294
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
1865
	r100_vram_init_sizes(rdev);
-
 
1866
}
1295
}
1867
 
1296
 
1868
 
1297
/*
1869
/*
1298
 * Indirect registers accessor
1870
 * Indirect registers accessor
1299
 */
1871
 */
1300
void r100_pll_errata_after_index(struct radeon_device *rdev)
1872
void r100_pll_errata_after_index(struct radeon_device *rdev)
1301
{
1873
{
1302
	if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1874
	if (!(rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS)) {
1303
		return;
1875
		return;
1304
	}
1876
	}
1305
	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
1877
	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
1306
	(void)RREG32(RADEON_CRTC_GEN_CNTL);
1878
	(void)RREG32(RADEON_CRTC_GEN_CNTL);
1307
}
1879
}
1308
 
1880
 
1309
static void r100_pll_errata_after_data(struct radeon_device *rdev)
1881
static void r100_pll_errata_after_data(struct radeon_device *rdev)
1310
{
1882
{
1311
	/* This workarounds is necessary on RV100, RS100 and RS200 chips
1883
	/* This workarounds is necessary on RV100, RS100 and RS200 chips
1312
	 * or the chip could hang on a subsequent access
1884
	 * or the chip could hang on a subsequent access
1313
	 */
1885
	 */
1314
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1886
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
1315
		udelay(5000);
1887
		udelay(5000);
1316
	}
1888
	}
1317
 
1889
 
1318
	/* This function is required to workaround a hardware bug in some (all?)
1890
	/* This function is required to workaround a hardware bug in some (all?)
1319
	 * revisions of the R300.  This workaround should be called after every
1891
	 * revisions of the R300.  This workaround should be called after every
1320
	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
1892
	 * CLOCK_CNTL_INDEX register access.  If not, register reads afterward
1321
	 * may not be correct.
1893
	 * may not be correct.
1322
	 */
1894
	 */
1323
	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1895
	if (rdev->pll_errata & CHIP_ERRATA_R300_CG) {
1324
		uint32_t save, tmp;
1896
		uint32_t save, tmp;
1325
 
1897
 
1326
		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1898
		save = RREG32(RADEON_CLOCK_CNTL_INDEX);
1327
		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1899
		tmp = save & ~(0x3f | RADEON_PLL_WR_EN);
1328
		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1900
		WREG32(RADEON_CLOCK_CNTL_INDEX, tmp);
1329
		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1901
		tmp = RREG32(RADEON_CLOCK_CNTL_DATA);
1330
		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1902
		WREG32(RADEON_CLOCK_CNTL_INDEX, save);
1331
	}
1903
	}
1332
}
1904
}
1333
 
1905
 
1334
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1906
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
1335
{
1907
{
1336
	uint32_t data;
1908
	uint32_t data;
1337
 
1909
 
1338
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1910
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
1339
	r100_pll_errata_after_index(rdev);
1911
	r100_pll_errata_after_index(rdev);
1340
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
1912
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
1341
	r100_pll_errata_after_data(rdev);
1913
	r100_pll_errata_after_data(rdev);
1342
	return data;
1914
	return data;
1343
}
1915
}
1344
 
1916
 
1345
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1917
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
1346
{
1918
{
1347
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1919
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
1348
	r100_pll_errata_after_index(rdev);
1920
	r100_pll_errata_after_index(rdev);
1349
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
1921
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
1350
	r100_pll_errata_after_data(rdev);
1922
	r100_pll_errata_after_data(rdev);
1351
}
1923
}
1352
 
-
 
1353
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg)
-
 
1354
{
-
 
1355
	if (reg < 0x10000)
-
 
1356
		return readl(((void __iomem *)rdev->rmmio) + reg);
-
 
1357
	else {
-
 
1358
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-
 
1359
		return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
1360
	}
-
 
1361
}
-
 
1362
 
-
 
1363
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
 
1364
{
-
 
1365
	if (reg < 0x10000)
-
 
1366
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
-
 
1367
	else {
-
 
1368
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-
 
1369
		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
1370
	}
-
 
1371
}
-
 
1372
 
1924
 
1373
int r100_init(struct radeon_device *rdev)
1925
int r100_init(struct radeon_device *rdev)
-
 
1926
{
-
 
1927
	if (ASIC_IS_RN50(rdev)) {
-
 
1928
		rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm;
-
 
1929
		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm);
-
 
1930
	} else if (rdev->family < CHIP_R200) {
-
 
1931
		rdev->config.r100.reg_safe_bm = r100_reg_safe_bm;
-
 
1932
		rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm);
-
 
1933
	} else {
-
 
1934
		return r200_init(rdev);
1374
{
1935
	}
1375
	return 0;
1936
	return 0;
1376
}
1937
}
1377
 
1938
 
1378
/*
1939
/*
1379
 * Debugfs info
1940
 * Debugfs info
1380
 */
1941
 */
1381
#if defined(CONFIG_DEBUG_FS)
1942
#if defined(CONFIG_DEBUG_FS)
1382
static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
1943
static int r100_debugfs_rbbm_info(struct seq_file *m, void *data)
1383
{
1944
{
1384
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1945
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1385
	struct drm_device *dev = node->minor->dev;
1946
	struct drm_device *dev = node->minor->dev;
1386
	struct radeon_device *rdev = dev->dev_private;
1947
	struct radeon_device *rdev = dev->dev_private;
1387
	uint32_t reg, value;
1948
	uint32_t reg, value;
1388
	unsigned i;
1949
	unsigned i;
1389
 
1950
 
1390
	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
1951
	seq_printf(m, "RBBM_STATUS 0x%08x\n", RREG32(RADEON_RBBM_STATUS));
1391
	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
1952
	seq_printf(m, "RBBM_CMDFIFO_STAT 0x%08x\n", RREG32(0xE7C));
1392
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1953
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1393
	for (i = 0; i < 64; i++) {
1954
	for (i = 0; i < 64; i++) {
1394
		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
1955
		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i | 0x100);
1395
		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
1956
		reg = (RREG32(RADEON_RBBM_CMDFIFO_DATA) - 1) >> 2;
1396
		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
1957
		WREG32(RADEON_RBBM_CMDFIFO_ADDR, i);
1397
		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
1958
		value = RREG32(RADEON_RBBM_CMDFIFO_DATA);
1398
		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
1959
		seq_printf(m, "[0x%03X] 0x%04X=0x%08X\n", i, reg, value);
1399
	}
1960
	}
1400
	return 0;
1961
	return 0;
1401
}
1962
}
1402
 
1963
 
1403
static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
1964
static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
1404
{
1965
{
1405
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1966
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1406
	struct drm_device *dev = node->minor->dev;
1967
	struct drm_device *dev = node->minor->dev;
1407
	struct radeon_device *rdev = dev->dev_private;
1968
	struct radeon_device *rdev = dev->dev_private;
1408
	uint32_t rdp, wdp;
1969
	uint32_t rdp, wdp;
1409
	unsigned count, i, j;
1970
	unsigned count, i, j;
1410
 
1971
 
1411
	radeon_ring_free_size(rdev);
1972
	radeon_ring_free_size(rdev);
1412
	rdp = RREG32(RADEON_CP_RB_RPTR);
1973
	rdp = RREG32(RADEON_CP_RB_RPTR);
1413
	wdp = RREG32(RADEON_CP_RB_WPTR);
1974
	wdp = RREG32(RADEON_CP_RB_WPTR);
1414
	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1975
	count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
1415
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1976
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1416
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1977
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
1417
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1978
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
1418
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1979
	seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
1419
	seq_printf(m, "%u dwords in ring\n", count);
1980
	seq_printf(m, "%u dwords in ring\n", count);
1420
	for (j = 0; j <= count; j++) {
1981
	for (j = 0; j <= count; j++) {
1421
		i = (rdp + j) & rdev->cp.ptr_mask;
1982
		i = (rdp + j) & rdev->cp.ptr_mask;
1422
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1983
		seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
1423
	}
1984
	}
1424
	return 0;
1985
	return 0;
1425
}
1986
}
1426
 
1987
 
1427
 
1988
 
1428
static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
1989
static int r100_debugfs_cp_csq_fifo(struct seq_file *m, void *data)
1429
{
1990
{
1430
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1991
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1431
	struct drm_device *dev = node->minor->dev;
1992
	struct drm_device *dev = node->minor->dev;
1432
	struct radeon_device *rdev = dev->dev_private;
1993
	struct radeon_device *rdev = dev->dev_private;
1433
	uint32_t csq_stat, csq2_stat, tmp;
1994
	uint32_t csq_stat, csq2_stat, tmp;
1434
	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
1995
	unsigned r_rptr, r_wptr, ib1_rptr, ib1_wptr, ib2_rptr, ib2_wptr;
1435
	unsigned i;
1996
	unsigned i;
1436
 
1997
 
1437
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1998
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
1438
	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
1999
	seq_printf(m, "CP_CSQ_MODE 0x%08x\n", RREG32(RADEON_CP_CSQ_MODE));
1439
	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
2000
	csq_stat = RREG32(RADEON_CP_CSQ_STAT);
1440
	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
2001
	csq2_stat = RREG32(RADEON_CP_CSQ2_STAT);
1441
	r_rptr = (csq_stat >> 0) & 0x3ff;
2002
	r_rptr = (csq_stat >> 0) & 0x3ff;
1442
	r_wptr = (csq_stat >> 10) & 0x3ff;
2003
	r_wptr = (csq_stat >> 10) & 0x3ff;
1443
	ib1_rptr = (csq_stat >> 20) & 0x3ff;
2004
	ib1_rptr = (csq_stat >> 20) & 0x3ff;
1444
	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
2005
	ib1_wptr = (csq2_stat >> 0) & 0x3ff;
1445
	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
2006
	ib2_rptr = (csq2_stat >> 10) & 0x3ff;
1446
	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
2007
	ib2_wptr = (csq2_stat >> 20) & 0x3ff;
1447
	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
2008
	seq_printf(m, "CP_CSQ_STAT 0x%08x\n", csq_stat);
1448
	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
2009
	seq_printf(m, "CP_CSQ2_STAT 0x%08x\n", csq2_stat);
1449
	seq_printf(m, "Ring rptr %u\n", r_rptr);
2010
	seq_printf(m, "Ring rptr %u\n", r_rptr);
1450
	seq_printf(m, "Ring wptr %u\n", r_wptr);
2011
	seq_printf(m, "Ring wptr %u\n", r_wptr);
1451
	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
2012
	seq_printf(m, "Indirect1 rptr %u\n", ib1_rptr);
1452
	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
2013
	seq_printf(m, "Indirect1 wptr %u\n", ib1_wptr);
1453
	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
2014
	seq_printf(m, "Indirect2 rptr %u\n", ib2_rptr);
1454
	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
2015
	seq_printf(m, "Indirect2 wptr %u\n", ib2_wptr);
1455
	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
2016
	/* FIXME: 0, 128, 640 depends on fifo setup see cp_init_kms
1456
	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
2017
	 * 128 = indirect1_start * 8 & 640 = indirect2_start * 8 */
1457
	seq_printf(m, "Ring fifo:\n");
2018
	seq_printf(m, "Ring fifo:\n");
1458
	for (i = 0; i < 256; i++) {
2019
	for (i = 0; i < 256; i++) {
1459
		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2020
		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1460
		tmp = RREG32(RADEON_CP_CSQ_DATA);
2021
		tmp = RREG32(RADEON_CP_CSQ_DATA);
1461
		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
2022
		seq_printf(m, "rfifo[%04d]=0x%08X\n", i, tmp);
1462
	}
2023
	}
1463
	seq_printf(m, "Indirect1 fifo:\n");
2024
	seq_printf(m, "Indirect1 fifo:\n");
1464
	for (i = 256; i <= 512; i++) {
2025
	for (i = 256; i <= 512; i++) {
1465
		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2026
		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1466
		tmp = RREG32(RADEON_CP_CSQ_DATA);
2027
		tmp = RREG32(RADEON_CP_CSQ_DATA);
1467
		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
2028
		seq_printf(m, "ib1fifo[%04d]=0x%08X\n", i, tmp);
1468
	}
2029
	}
1469
	seq_printf(m, "Indirect2 fifo:\n");
2030
	seq_printf(m, "Indirect2 fifo:\n");
1470
	for (i = 640; i < ib1_wptr; i++) {
2031
	for (i = 640; i < ib1_wptr; i++) {
1471
		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
2032
		WREG32(RADEON_CP_CSQ_ADDR, i << 2);
1472
		tmp = RREG32(RADEON_CP_CSQ_DATA);
2033
		tmp = RREG32(RADEON_CP_CSQ_DATA);
1473
		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
2034
		seq_printf(m, "ib2fifo[%04d]=0x%08X\n", i, tmp);
1474
	}
2035
	}
1475
	return 0;
2036
	return 0;
1476
}
2037
}
1477
 
2038
 
1478
static int r100_debugfs_mc_info(struct seq_file *m, void *data)
2039
static int r100_debugfs_mc_info(struct seq_file *m, void *data)
1479
{
2040
{
1480
	struct drm_info_node *node = (struct drm_info_node *) m->private;
2041
	struct drm_info_node *node = (struct drm_info_node *) m->private;
1481
	struct drm_device *dev = node->minor->dev;
2042
	struct drm_device *dev = node->minor->dev;
1482
	struct radeon_device *rdev = dev->dev_private;
2043
	struct radeon_device *rdev = dev->dev_private;
1483
	uint32_t tmp;
2044
	uint32_t tmp;
1484
 
2045
 
1485
	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
2046
	tmp = RREG32(RADEON_CONFIG_MEMSIZE);
1486
	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
2047
	seq_printf(m, "CONFIG_MEMSIZE 0x%08x\n", tmp);
1487
	tmp = RREG32(RADEON_MC_FB_LOCATION);
2048
	tmp = RREG32(RADEON_MC_FB_LOCATION);
1488
	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
2049
	seq_printf(m, "MC_FB_LOCATION 0x%08x\n", tmp);
1489
	tmp = RREG32(RADEON_BUS_CNTL);
2050
	tmp = RREG32(RADEON_BUS_CNTL);
1490
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
2051
	seq_printf(m, "BUS_CNTL 0x%08x\n", tmp);
1491
	tmp = RREG32(RADEON_MC_AGP_LOCATION);
2052
	tmp = RREG32(RADEON_MC_AGP_LOCATION);
1492
	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
2053
	seq_printf(m, "MC_AGP_LOCATION 0x%08x\n", tmp);
1493
	tmp = RREG32(RADEON_AGP_BASE);
2054
	tmp = RREG32(RADEON_AGP_BASE);
1494
	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
2055
	seq_printf(m, "AGP_BASE 0x%08x\n", tmp);
1495
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
2056
	tmp = RREG32(RADEON_HOST_PATH_CNTL);
1496
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
2057
	seq_printf(m, "HOST_PATH_CNTL 0x%08x\n", tmp);
1497
	tmp = RREG32(0x01D0);
2058
	tmp = RREG32(0x01D0);
1498
	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
2059
	seq_printf(m, "AIC_CTRL 0x%08x\n", tmp);
1499
	tmp = RREG32(RADEON_AIC_LO_ADDR);
2060
	tmp = RREG32(RADEON_AIC_LO_ADDR);
1500
	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
2061
	seq_printf(m, "AIC_LO_ADDR 0x%08x\n", tmp);
1501
	tmp = RREG32(RADEON_AIC_HI_ADDR);
2062
	tmp = RREG32(RADEON_AIC_HI_ADDR);
1502
	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
2063
	seq_printf(m, "AIC_HI_ADDR 0x%08x\n", tmp);
1503
	tmp = RREG32(0x01E4);
2064
	tmp = RREG32(0x01E4);
1504
	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
2065
	seq_printf(m, "AIC_TLB_ADDR 0x%08x\n", tmp);
1505
	return 0;
2066
	return 0;
1506
}
2067
}
1507
 
2068
 
1508
static struct drm_info_list r100_debugfs_rbbm_list[] = {
2069
static struct drm_info_list r100_debugfs_rbbm_list[] = {
1509
	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
2070
	{"r100_rbbm_info", r100_debugfs_rbbm_info, 0, NULL},
1510
};
2071
};
1511
 
2072
 
1512
static struct drm_info_list r100_debugfs_cp_list[] = {
2073
static struct drm_info_list r100_debugfs_cp_list[] = {
1513
	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
2074
	{"r100_cp_ring_info", r100_debugfs_cp_ring_info, 0, NULL},
1514
	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
2075
	{"r100_cp_csq_fifo", r100_debugfs_cp_csq_fifo, 0, NULL},
1515
};
2076
};
1516
 
2077
 
1517
static struct drm_info_list r100_debugfs_mc_info_list[] = {
2078
static struct drm_info_list r100_debugfs_mc_info_list[] = {
1518
	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
2079
	{"r100_mc_info", r100_debugfs_mc_info, 0, NULL},
1519
};
2080
};
1520
#endif
2081
#endif
1521
 
2082
 
1522
int r100_debugfs_rbbm_init(struct radeon_device *rdev)
2083
int r100_debugfs_rbbm_init(struct radeon_device *rdev)
1523
{
2084
{
1524
#if defined(CONFIG_DEBUG_FS)
2085
#if defined(CONFIG_DEBUG_FS)
1525
	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
2086
	return radeon_debugfs_add_files(rdev, r100_debugfs_rbbm_list, 1);
1526
#else
2087
#else
1527
	return 0;
2088
	return 0;
1528
#endif
2089
#endif
1529
}
2090
}
1530
 
2091
 
1531
int r100_debugfs_cp_init(struct radeon_device *rdev)
2092
int r100_debugfs_cp_init(struct radeon_device *rdev)
1532
{
2093
{
1533
#if defined(CONFIG_DEBUG_FS)
2094
#if defined(CONFIG_DEBUG_FS)
1534
	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
2095
	return radeon_debugfs_add_files(rdev, r100_debugfs_cp_list, 2);
1535
#else
2096
#else
1536
	return 0;
2097
	return 0;
1537
#endif
2098
#endif
1538
}
2099
}
1539
 
2100
 
1540
int r100_debugfs_mc_info_init(struct radeon_device *rdev)
2101
int r100_debugfs_mc_info_init(struct radeon_device *rdev)
1541
{
2102
{
1542
#if defined(CONFIG_DEBUG_FS)
2103
#if defined(CONFIG_DEBUG_FS)
1543
	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
2104
	return radeon_debugfs_add_files(rdev, r100_debugfs_mc_info_list, 1);
1544
#else
2105
#else
1545
	return 0;
2106
	return 0;
1546
#endif
2107
#endif
1547
}
2108
}
-
 
2109
 
-
 
2110
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
-
 
2111
			 uint32_t tiling_flags, uint32_t pitch,
-
 
2112
			 uint32_t offset, uint32_t obj_size)
-
 
2113
{
-
 
2114
	int surf_index = reg * 16;
-
 
2115
	int flags = 0;
-
 
2116
 
-
 
2117
	/* r100/r200 divide by 16 */
-
 
2118
	if (rdev->family < CHIP_R300)
-
 
2119
		flags = pitch / 16;
-
 
2120
	else
-
 
2121
		flags = pitch / 8;
-
 
2122
 
-
 
2123
	if (rdev->family <= CHIP_RS200) {
-
 
2124
		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
-
 
2125
				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
-
 
2126
			flags |= RADEON_SURF_TILE_COLOR_BOTH;
-
 
2127
		if (tiling_flags & RADEON_TILING_MACRO)
-
 
2128
			flags |= RADEON_SURF_TILE_COLOR_MACRO;
-
 
2129
	} else if (rdev->family <= CHIP_RV280) {
-
 
2130
		if (tiling_flags & (RADEON_TILING_MACRO))
-
 
2131
			flags |= R200_SURF_TILE_COLOR_MACRO;
-
 
2132
		if (tiling_flags & RADEON_TILING_MICRO)
-
 
2133
			flags |= R200_SURF_TILE_COLOR_MICRO;
-
 
2134
	} else {
-
 
2135
		if (tiling_flags & RADEON_TILING_MACRO)
-
 
2136
			flags |= R300_SURF_TILE_MACRO;
-
 
2137
		if (tiling_flags & RADEON_TILING_MICRO)
-
 
2138
			flags |= R300_SURF_TILE_MICRO;
-
 
2139
	}
-
 
2140
 
-
 
2141
	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
-
 
2142
		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
-
 
2143
	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
-
 
2144
		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
-
 
2145
 
-
 
2146
	DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1);
-
 
2147
	WREG32(RADEON_SURFACE0_INFO + surf_index, flags);
-
 
2148
	WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset);
-
 
2149
	WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1);
-
 
2150
	return 0;
-
 
2151
}
-
 
2152
 
-
 
2153
void r100_clear_surface_reg(struct radeon_device *rdev, int reg)
-
 
2154
{
-
 
2155
	int surf_index = reg * 16;
-
 
2156
	WREG32(RADEON_SURFACE0_INFO + surf_index, 0);
-
 
2157
}
-
 
2158
 
-
 
2159
void r100_bandwidth_update(struct radeon_device *rdev)
-
 
2160
{
-
 
2161
	fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff;
-
 
2162
	fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff;
-
 
2163
	fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff;
-
 
2164
	uint32_t temp, data, mem_trcd, mem_trp, mem_tras;
-
 
2165
	fixed20_12 memtcas_ff[8] = {
-
 
2166
		fixed_init(1),
-
 
2167
		fixed_init(2),
-
 
2168
		fixed_init(3),
-
 
2169
		fixed_init(0),
-
 
2170
		fixed_init_half(1),
-
 
2171
		fixed_init_half(2),
-
 
2172
		fixed_init(0),
-
 
2173
	};
-
 
2174
	fixed20_12 memtcas_rs480_ff[8] = {
-
 
2175
		fixed_init(0),
-
 
2176
		fixed_init(1),
-
 
2177
		fixed_init(2),
-
 
2178
		fixed_init(3),
-
 
2179
		fixed_init(0),
-
 
2180
		fixed_init_half(1),
-
 
2181
		fixed_init_half(2),
-
 
2182
		fixed_init_half(3),
-
 
2183
	};
-
 
2184
	fixed20_12 memtcas2_ff[8] = {
-
 
2185
		fixed_init(0),
-
 
2186
		fixed_init(1),
-
 
2187
		fixed_init(2),
-
 
2188
		fixed_init(3),
-
 
2189
		fixed_init(4),
-
 
2190
		fixed_init(5),
-
 
2191
		fixed_init(6),
-
 
2192
		fixed_init(7),
-
 
2193
	};
-
 
2194
	fixed20_12 memtrbs[8] = {
-
 
2195
		fixed_init(1),
-
 
2196
		fixed_init_half(1),
-
 
2197
		fixed_init(2),
-
 
2198
		fixed_init_half(2),
-
 
2199
		fixed_init(3),
-
 
2200
		fixed_init_half(3),
-
 
2201
		fixed_init(4),
-
 
2202
		fixed_init_half(4)
-
 
2203
	};
-
 
2204
	fixed20_12 memtrbs_r4xx[8] = {
-
 
2205
		fixed_init(4),
-
 
2206
		fixed_init(5),
-
 
2207
		fixed_init(6),
-
 
2208
		fixed_init(7),
-
 
2209
		fixed_init(8),
-
 
2210
		fixed_init(9),
-
 
2211
		fixed_init(10),
-
 
2212
		fixed_init(11)
-
 
2213
	};
-
 
2214
	fixed20_12 min_mem_eff;
-
 
2215
	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
-
 
2216
	fixed20_12 cur_latency_mclk, cur_latency_sclk;
-
 
2217
	fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate,
-
 
2218
		disp_drain_rate2, read_return_rate;
-
 
2219
	fixed20_12 time_disp1_drop_priority;
-
 
2220
	int c;
-
 
2221
	int cur_size = 16;       /* in octawords */
-
 
2222
	int critical_point = 0, critical_point2;
-
 
2223
/* 	uint32_t read_return_rate, time_disp1_drop_priority; */
-
 
2224
	int stop_req, max_stop_req;
-
 
2225
	struct drm_display_mode *mode1 = NULL;
-
 
2226
	struct drm_display_mode *mode2 = NULL;
-
 
2227
	uint32_t pixel_bytes1 = 0;
-
 
2228
	uint32_t pixel_bytes2 = 0;
-
 
2229
 
-
 
2230
	if (rdev->mode_info.crtcs[0]->base.enabled) {
-
 
2231
		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
-
 
2232
		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
-
 
2233
	}
-
 
2234
	if (rdev->mode_info.crtcs[1]->base.enabled) {
-
 
2235
		mode2 = &rdev->mode_info.crtcs[1]->base.mode;
-
 
2236
		pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
-
 
2237
	}
-
 
2238
 
-
 
2239
	min_mem_eff.full = rfixed_const_8(0);
-
 
2240
	/* get modes */
-
 
2241
	if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) {
-
 
2242
		uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER);
-
 
2243
		mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT);
-
 
2244
		mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT);
-
 
2245
		/* check crtc enables */
-
 
2246
		if (mode2)
-
 
2247
			mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT);
-
 
2248
		if (mode1)
-
 
2249
			mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT);
-
 
2250
		WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer);
-
 
2251
	}
-
 
2252
 
-
 
2253
	/*
-
 
2254
	 * determine is there is enough bw for current mode
-
 
2255
	 */
-
 
2256
	mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
-
 
2257
	temp_ff.full = rfixed_const(100);
-
 
2258
	mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
-
 
2259
	sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
-
 
2260
	sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
-
 
2261
 
-
 
2262
	temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
-
 
2263
	temp_ff.full = rfixed_const(temp);
-
 
2264
	mem_bw.full = rfixed_mul(mclk_ff, temp_ff);
-
 
2265
 
-
 
2266
	pix_clk.full = 0;
-
 
2267
	pix_clk2.full = 0;
-
 
2268
	peak_disp_bw.full = 0;
-
 
2269
	if (mode1) {
-
 
2270
		temp_ff.full = rfixed_const(1000);
-
 
2271
		pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */
-
 
2272
		pix_clk.full = rfixed_div(pix_clk, temp_ff);
-
 
2273
		temp_ff.full = rfixed_const(pixel_bytes1);
-
 
2274
		peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff);
-
 
2275
	}
-
 
2276
	if (mode2) {
-
 
2277
		temp_ff.full = rfixed_const(1000);
-
 
2278
		pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */
-
 
2279
		pix_clk2.full = rfixed_div(pix_clk2, temp_ff);
-
 
2280
		temp_ff.full = rfixed_const(pixel_bytes2);
-
 
2281
		peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff);
-
 
2282
	}
-
 
2283
 
-
 
2284
	mem_bw.full = rfixed_mul(mem_bw, min_mem_eff);
-
 
2285
	if (peak_disp_bw.full >= mem_bw.full) {
-
 
2286
		DRM_ERROR("You may not have enough display bandwidth for current mode\n"
-
 
2287
			  "If you have flickering problem, try to lower resolution, refresh rate, or color depth\n");
-
 
2288
	}
-
 
2289
 
-
 
2290
	/*  Get values from the EXT_MEM_CNTL register...converting its contents. */
-
 
2291
	temp = RREG32(RADEON_MEM_TIMING_CNTL);
-
 
2292
	if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */
-
 
2293
		mem_trcd = ((temp >> 2) & 0x3) + 1;
-
 
2294
		mem_trp  = ((temp & 0x3)) + 1;
-
 
2295
		mem_tras = ((temp & 0x70) >> 4) + 1;
-
 
2296
	} else if (rdev->family == CHIP_R300 ||
-
 
2297
		   rdev->family == CHIP_R350) { /* r300, r350 */
-
 
2298
		mem_trcd = (temp & 0x7) + 1;
-
 
2299
		mem_trp = ((temp >> 8) & 0x7) + 1;
-
 
2300
		mem_tras = ((temp >> 11) & 0xf) + 4;
-
 
2301
	} else if (rdev->family == CHIP_RV350 ||
-
 
2302
		   rdev->family <= CHIP_RV380) {
-
 
2303
		/* rv3x0 */
-
 
2304
		mem_trcd = (temp & 0x7) + 3;
-
 
2305
		mem_trp = ((temp >> 8) & 0x7) + 3;
-
 
2306
		mem_tras = ((temp >> 11) & 0xf) + 6;
-
 
2307
	} else if (rdev->family == CHIP_R420 ||
-
 
2308
		   rdev->family == CHIP_R423 ||
-
 
2309
		   rdev->family == CHIP_RV410) {
-
 
2310
		/* r4xx */
-
 
2311
		mem_trcd = (temp & 0xf) + 3;
-
 
2312
		if (mem_trcd > 15)
-
 
2313
			mem_trcd = 15;
-
 
2314
		mem_trp = ((temp >> 8) & 0xf) + 3;
-
 
2315
		if (mem_trp > 15)
-
 
2316
			mem_trp = 15;
-
 
2317
		mem_tras = ((temp >> 12) & 0x1f) + 6;
-
 
2318
		if (mem_tras > 31)
-
 
2319
			mem_tras = 31;
-
 
2320
	} else { /* RV200, R200 */
-
 
2321
		mem_trcd = (temp & 0x7) + 1;
-
 
2322
		mem_trp = ((temp >> 8) & 0x7) + 1;
-
 
2323
		mem_tras = ((temp >> 12) & 0xf) + 4;
-
 
2324
	}
-
 
2325
	/* convert to FF */
-
 
2326
	trcd_ff.full = rfixed_const(mem_trcd);
-
 
2327
	trp_ff.full = rfixed_const(mem_trp);
-
 
2328
	tras_ff.full = rfixed_const(mem_tras);
-
 
2329
 
-
 
2330
	/* Get values from the MEM_SDRAM_MODE_REG register...converting its */
-
 
2331
	temp = RREG32(RADEON_MEM_SDRAM_MODE_REG);
-
 
2332
	data = (temp & (7 << 20)) >> 20;
-
 
2333
	if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) {
-
 
2334
		if (rdev->family == CHIP_RS480) /* don't think rs400 */
-
 
2335
			tcas_ff = memtcas_rs480_ff[data];
-
 
2336
		else
-
 
2337
			tcas_ff = memtcas_ff[data];
-
 
2338
	} else
-
 
2339
		tcas_ff = memtcas2_ff[data];
-
 
2340
 
-
 
2341
	if (rdev->family == CHIP_RS400 ||
-
 
2342
	    rdev->family == CHIP_RS480) {
-
 
2343
		/* extra cas latency stored in bits 23-25 0-4 clocks */
-
 
2344
		data = (temp >> 23) & 0x7;
-
 
2345
		if (data < 5)
-
 
2346
			tcas_ff.full += rfixed_const(data);
-
 
2347
	}
-
 
2348
 
-
 
2349
	if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
-
 
2350
		/* on the R300, Tcas is included in Trbs.
-
 
2351
		 */
-
 
2352
		temp = RREG32(RADEON_MEM_CNTL);
-
 
2353
		data = (R300_MEM_NUM_CHANNELS_MASK & temp);
-
 
2354
		if (data == 1) {
-
 
2355
			if (R300_MEM_USE_CD_CH_ONLY & temp) {
-
 
2356
				temp = RREG32(R300_MC_IND_INDEX);
-
 
2357
				temp &= ~R300_MC_IND_ADDR_MASK;
-
 
2358
				temp |= R300_MC_READ_CNTL_CD_mcind;
-
 
2359
				WREG32(R300_MC_IND_INDEX, temp);
-
 
2360
				temp = RREG32(R300_MC_IND_DATA);
-
 
2361
				data = (R300_MEM_RBS_POSITION_C_MASK & temp);
-
 
2362
			} else {
-
 
2363
				temp = RREG32(R300_MC_READ_CNTL_AB);
-
 
2364
				data = (R300_MEM_RBS_POSITION_A_MASK & temp);
-
 
2365
			}
-
 
2366
		} else {
-
 
2367
			temp = RREG32(R300_MC_READ_CNTL_AB);
-
 
2368
			data = (R300_MEM_RBS_POSITION_A_MASK & temp);
-
 
2369
		}
-
 
2370
		if (rdev->family == CHIP_RV410 ||
-
 
2371
		    rdev->family == CHIP_R420 ||
-
 
2372
		    rdev->family == CHIP_R423)
-
 
2373
			trbs_ff = memtrbs_r4xx[data];
-
 
2374
		else
-
 
2375
			trbs_ff = memtrbs[data];
-
 
2376
		tcas_ff.full += trbs_ff.full;
-
 
2377
	}
-
 
2378
 
-
 
2379
	sclk_eff_ff.full = sclk_ff.full;
-
 
2380
 
-
 
2381
	if (rdev->flags & RADEON_IS_AGP) {
-
 
2382
		fixed20_12 agpmode_ff;
-
 
2383
		agpmode_ff.full = rfixed_const(radeon_agpmode);
-
 
2384
		temp_ff.full = rfixed_const_666(16);
-
 
2385
		sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff);
-
 
2386
	}
-
 
2387
	/* TODO PCIE lanes may affect this - agpmode == 16?? */
-
 
2388
 
-
 
2389
	if (ASIC_IS_R300(rdev)) {
-
 
2390
		sclk_delay_ff.full = rfixed_const(250);
-
 
2391
	} else {
-
 
2392
		if ((rdev->family == CHIP_RV100) ||
-
 
2393
		    rdev->flags & RADEON_IS_IGP) {
-
 
2394
			if (rdev->mc.vram_is_ddr)
-
 
2395
				sclk_delay_ff.full = rfixed_const(41);
-
 
2396
			else
-
 
2397
				sclk_delay_ff.full = rfixed_const(33);
-
 
2398
		} else {
-
 
2399
			if (rdev->mc.vram_width == 128)
-
 
2400
				sclk_delay_ff.full = rfixed_const(57);
-
 
2401
			else
-
 
2402
				sclk_delay_ff.full = rfixed_const(41);
-
 
2403
		}
-
 
2404
	}
-
 
2405
 
-
 
2406
	mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff);
-
 
2407
 
-
 
2408
	if (rdev->mc.vram_is_ddr) {
-
 
2409
		if (rdev->mc.vram_width == 32) {
-
 
2410
			k1.full = rfixed_const(40);
-
 
2411
			c  = 3;
-
 
2412
		} else {
-
 
2413
			k1.full = rfixed_const(20);
-
 
2414
			c  = 1;
-
 
2415
		}
-
 
2416
	} else {
-
 
2417
		k1.full = rfixed_const(40);
-
 
2418
		c  = 3;
-
 
2419
	}
-
 
2420
 
-
 
2421
	temp_ff.full = rfixed_const(2);
-
 
2422
	mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff);
-
 
2423
	temp_ff.full = rfixed_const(c);
-
 
2424
	mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff);
-
 
2425
	temp_ff.full = rfixed_const(4);
-
 
2426
	mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff);
-
 
2427
	mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff);
-
 
2428
	mc_latency_mclk.full += k1.full;
-
 
2429
 
-
 
2430
	mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff);
-
 
2431
	mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff);
-
 
2432
 
-
 
2433
	/*
-
 
2434
	  HW cursor time assuming worst case of full size colour cursor.
-
 
2435
	*/
-
 
2436
	temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1))));
-
 
2437
	temp_ff.full += trcd_ff.full;
-
 
2438
	if (temp_ff.full < tras_ff.full)
-
 
2439
		temp_ff.full = tras_ff.full;
-
 
2440
	cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff);
-
 
2441
 
-
 
2442
	temp_ff.full = rfixed_const(cur_size);
-
 
2443
	cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff);
-
 
2444
	/*
-
 
2445
	  Find the total latency for the display data.
-
 
2446
	*/
-
 
2447
	disp_latency_overhead.full = rfixed_const(80);
-
 
2448
	disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff);
-
 
2449
	mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full;
-
 
2450
	mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full;
-
 
2451
 
-
 
2452
	if (mc_latency_mclk.full > mc_latency_sclk.full)
-
 
2453
		disp_latency.full = mc_latency_mclk.full;
-
 
2454
	else
-
 
2455
		disp_latency.full = mc_latency_sclk.full;
-
 
2456
 
-
 
2457
	/* setup Max GRPH_STOP_REQ default value */
-
 
2458
	if (ASIC_IS_RV100(rdev))
-
 
2459
		max_stop_req = 0x5c;
-
 
2460
	else
-
 
2461
		max_stop_req = 0x7c;
-
 
2462
 
-
 
2463
	if (mode1) {
-
 
2464
		/*  CRTC1
-
 
2465
		    Set GRPH_BUFFER_CNTL register using h/w defined optimal values.
-
 
2466
		    GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ]
-
 
2467
		*/
-
 
2468
		stop_req = mode1->hdisplay * pixel_bytes1 / 16;
-
 
2469
 
-
 
2470
		if (stop_req > max_stop_req)
-
 
2471
			stop_req = max_stop_req;
-
 
2472
 
-
 
2473
		/*
-
 
2474
		  Find the drain rate of the display buffer.
-
 
2475
		*/
-
 
2476
		temp_ff.full = rfixed_const((16/pixel_bytes1));
-
 
2477
		disp_drain_rate.full = rfixed_div(pix_clk, temp_ff);
-
 
2478
 
-
 
2479
		/*
-
 
2480
		  Find the critical point of the display buffer.
-
 
2481
		*/
-
 
2482
		crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency);
-
 
2483
		crit_point_ff.full += rfixed_const_half(0);
-
 
2484
 
-
 
2485
		critical_point = rfixed_trunc(crit_point_ff);
-
 
2486
 
-
 
2487
		if (rdev->disp_priority == 2) {
-
 
2488
			critical_point = 0;
-
 
2489
		}
-
 
2490
 
-
 
2491
		/*
-
 
2492
		  The critical point should never be above max_stop_req-4.  Setting
-
 
2493
		  GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time.
-
 
2494
		*/
-
 
2495
		if (max_stop_req - critical_point < 4)
-
 
2496
			critical_point = 0;
-
 
2497
 
-
 
2498
		if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) {
-
 
2499
			/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/
-
 
2500
			critical_point = 0x10;
-
 
2501
		}
-
 
2502
 
-
 
2503
		temp = RREG32(RADEON_GRPH_BUFFER_CNTL);
-
 
2504
		temp &= ~(RADEON_GRPH_STOP_REQ_MASK);
-
 
2505
		temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
-
 
2506
		temp &= ~(RADEON_GRPH_START_REQ_MASK);
-
 
2507
		if ((rdev->family == CHIP_R350) &&
-
 
2508
		    (stop_req > 0x15)) {
-
 
2509
			stop_req -= 0x10;
-
 
2510
		}
-
 
2511
		temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
-
 
2512
		temp |= RADEON_GRPH_BUFFER_SIZE;
-
 
2513
		temp &= ~(RADEON_GRPH_CRITICAL_CNTL   |
-
 
2514
			  RADEON_GRPH_CRITICAL_AT_SOF |
-
 
2515
			  RADEON_GRPH_STOP_CNTL);
-
 
2516
		/*
-
 
2517
		  Write the result into the register.
-
 
2518
		*/
-
 
2519
		WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
-
 
2520
						       (critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
-
 
2521
 
-
 
2522
#if 0
-
 
2523
		if ((rdev->family == CHIP_RS400) ||
-
 
2524
		    (rdev->family == CHIP_RS480)) {
-
 
2525
			/* attempt to program RS400 disp regs correctly ??? */
-
 
2526
			temp = RREG32(RS400_DISP1_REG_CNTL);
-
 
2527
			temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK |
-
 
2528
				  RS400_DISP1_STOP_REQ_LEVEL_MASK);
-
 
2529
			WREG32(RS400_DISP1_REQ_CNTL1, (temp |
-
 
2530
						       (critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
-
 
2531
						       (critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
-
 
2532
			temp = RREG32(RS400_DMIF_MEM_CNTL1);
-
 
2533
			temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK |
-
 
2534
				  RS400_DISP1_CRITICAL_POINT_STOP_MASK);
-
 
2535
			WREG32(RS400_DMIF_MEM_CNTL1, (temp |
-
 
2536
						      (critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) |
-
 
2537
						      (critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT)));
-
 
2538
		}
-
 
2539
#endif
-
 
2540
 
-
 
2541
		DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n",
-
 
2542
			  /* 	  (unsigned int)info->SavedReg->grph_buffer_cntl, */
-
 
2543
			  (unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL));
-
 
2544
	}
-
 
2545
 
-
 
2546
	if (mode2) {
-
 
2547
		u32 grph2_cntl;
-
 
2548
		stop_req = mode2->hdisplay * pixel_bytes2 / 16;
-
 
2549
 
-
 
2550
		if (stop_req > max_stop_req)
-
 
2551
			stop_req = max_stop_req;
-
 
2552
 
-
 
2553
		/*
-
 
2554
		  Find the drain rate of the display buffer.
-
 
2555
		*/
-
 
2556
		temp_ff.full = rfixed_const((16/pixel_bytes2));
-
 
2557
		disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff);
-
 
2558
 
-
 
2559
		grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL);
-
 
2560
		grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK);
-
 
2561
		grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT);
-
 
2562
		grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK);
-
 
2563
		if ((rdev->family == CHIP_R350) &&
-
 
2564
		    (stop_req > 0x15)) {
-
 
2565
			stop_req -= 0x10;
-
 
2566
		}
-
 
2567
		grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT);
-
 
2568
		grph2_cntl |= RADEON_GRPH_BUFFER_SIZE;
-
 
2569
		grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL   |
-
 
2570
			  RADEON_GRPH_CRITICAL_AT_SOF |
-
 
2571
			  RADEON_GRPH_STOP_CNTL);
-
 
2572
 
-
 
2573
		if ((rdev->family == CHIP_RS100) ||
-
 
2574
		    (rdev->family == CHIP_RS200))
-
 
2575
			critical_point2 = 0;
-
 
2576
		else {
-
 
2577
			temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128;
-
 
2578
			temp_ff.full = rfixed_const(temp);
-
 
2579
			temp_ff.full = rfixed_mul(mclk_ff, temp_ff);
-
 
2580
			if (sclk_ff.full < temp_ff.full)
-
 
2581
				temp_ff.full = sclk_ff.full;
-
 
2582
 
-
 
2583
			read_return_rate.full = temp_ff.full;
-
 
2584
 
-
 
2585
			if (mode1) {
-
 
2586
				temp_ff.full = read_return_rate.full - disp_drain_rate.full;
-
 
2587
				time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff);
-
 
2588
			} else {
-
 
2589
				time_disp1_drop_priority.full = 0;
-
 
2590
			}
-
 
2591
			crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full;
-
 
2592
			crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2);
-
 
2593
			crit_point_ff.full += rfixed_const_half(0);
-
 
2594
 
-
 
2595
			critical_point2 = rfixed_trunc(crit_point_ff);
-
 
2596
 
-
 
2597
			if (rdev->disp_priority == 2) {
-
 
2598
				critical_point2 = 0;
-
 
2599
			}
-
 
2600
 
-
 
2601
			if (max_stop_req - critical_point2 < 4)
-
 
2602
				critical_point2 = 0;
-
 
2603
 
-
 
2604
		}
-
 
2605
 
-
 
2606
		if (critical_point2 == 0 && rdev->family == CHIP_R300) {
-
 
2607
			/* some R300 cards have problem with this set to 0 */
-
 
2608
			critical_point2 = 0x10;
-
 
2609
		}
-
 
2610
 
-
 
2611
		WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) |
-
 
2612
						  (critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT)));
-
 
2613
 
-
 
2614
		if ((rdev->family == CHIP_RS400) ||
-
 
2615
		    (rdev->family == CHIP_RS480)) {
-
 
2616
#if 0
-
 
2617
			/* attempt to program RS400 disp2 regs correctly ??? */
-
 
2618
			temp = RREG32(RS400_DISP2_REQ_CNTL1);
-
 
2619
			temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK |
-
 
2620
				  RS400_DISP2_STOP_REQ_LEVEL_MASK);
-
 
2621
			WREG32(RS400_DISP2_REQ_CNTL1, (temp |
-
 
2622
						       (critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) |
-
 
2623
						       (critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT)));
-
 
2624
			temp = RREG32(RS400_DISP2_REQ_CNTL2);
-
 
2625
			temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK |
-
 
2626
				  RS400_DISP2_CRITICAL_POINT_STOP_MASK);
-
 
2627
			WREG32(RS400_DISP2_REQ_CNTL2, (temp |
-
 
2628
						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) |
-
 
2629
						       (critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT)));
-
 
2630
#endif
-
 
2631
			WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC);
-
 
2632
			WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000);
-
 
2633
			WREG32(RS400_DMIF_MEM_CNTL1,  0x29CA71DC);
-
 
2634
			WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC);
-
 
2635
		}
-
 
2636
 
-
 
2637
		DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n",
-
 
2638
			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
-
 
2639
	}
-
 
2640
}
-
 
2641
 
-
 
2642
 
-
 
2643
 
-
 
2644
 
-
 
2645
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
-
 
2646
{
-
 
2647
	/* Shutdown CP we shouldn't need to do that but better be safe than
-
 
2648
	 * sorry
-
 
2649
	 */
-
 
2650
	rdev->cp.ready = false;
-
 
2651
	WREG32(R_000740_CP_CSQ_CNTL, 0);
-
 
2652
 
-
 
2653
	/* Save few CRTC registers */
-
 
2654
	save->GENMO_WT = RREG32(R_0003C0_GENMO_WT);
-
 
2655
	save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL);
-
 
2656
	save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL);
-
 
2657
	save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET);
-
 
2658
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
-
 
2659
		save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL);
-
 
2660
		save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET);
-
 
2661
	}
-
 
2662
 
-
 
2663
	/* Disable VGA aperture access */
-
 
2664
	WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT);
-
 
2665
	/* Disable cursor, overlay, crtc */
-
 
2666
	WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1));
-
 
2667
	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL |
-
 
2668
					S_000054_CRTC_DISPLAY_DIS(1));
-
 
2669
	WREG32(R_000050_CRTC_GEN_CNTL,
-
 
2670
			(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) |
-
 
2671
			S_000050_CRTC_DISP_REQ_EN_B(1));
-
 
2672
	WREG32(R_000420_OV0_SCALE_CNTL,
-
 
2673
		C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL));
-
 
2674
	WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET);
-
 
2675
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
-
 
2676
		WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET |
-
 
2677
						S_000360_CUR2_LOCK(1));
-
 
2678
		WREG32(R_0003F8_CRTC2_GEN_CNTL,
-
 
2679
			(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) |
-
 
2680
			S_0003F8_CRTC2_DISPLAY_DIS(1) |
-
 
2681
			S_0003F8_CRTC2_DISP_REQ_EN_B(1));
-
 
2682
		WREG32(R_000360_CUR2_OFFSET,
-
 
2683
			C_000360_CUR2_LOCK & save->CUR2_OFFSET);
-
 
2684
	}
-
 
2685
}
-
 
2686
 
-
 
2687
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
-
 
2688
{
-
 
2689
	/* Update base address for crtc */
-
 
2690
	WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location);
-
 
2691
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
-
 
2692
		WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR,
-
 
2693
				rdev->mc.vram_location);
-
 
2694
	}
-
 
2695
	/* Restore CRTC registers */
-
 
2696
	WREG32(R_0003C0_GENMO_WT, save->GENMO_WT);
-
 
2697
	WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL);
-
 
2698
	WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL);
-
 
2699
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
-
 
2700
		WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL);
-
 
2701
	}
-
 
2702
}
-
 
2703
 
-
 
2704
int drm_order(unsigned long size)
-
 
2705
{
-
 
2706
    int order;
-
 
2707
    unsigned long tmp;
-
 
2708
 
-
 
2709
    for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
-
 
2710
 
-
 
2711
    if (size & (size - 1))
-
 
2712
        ++order;
-
 
2713
 
-
 
2714
    return order;
-
 
2715
}
-
 
2716
>
-
 
2717
>
-
 
2718
>
-
 
2719
>
-
 
2720
>
-
 
2721
>