Subversion Repositories Kolibri OS

Rev

Rev 1313 | Rev 1404 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1246 serge 1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
26
 *          Jerome Glisse
27
 */
1403 serge 28
//#include 
1246 serge 29
//#include 
30
#include "drmP.h"
31
#include "radeon.h"
32
#include "radeon_drm.h"
33
#include "rv770d.h"
34
#include "atom.h"
35
#include "avivod.h"
36
 
37
#define R700_PFP_UCODE_SIZE 848
38
#define R700_PM4_UCODE_SIZE 1360
39
 
40
static void rv770_gpu_init(struct radeon_device *rdev);
41
void rv770_fini(struct radeon_device *rdev);
42
 
43
 
44
/*
45
 * GART
46
 */
47
int rv770_pcie_gart_enable(struct radeon_device *rdev)
48
{
49
	u32 tmp;
50
	int r, i;
51
 
52
	if (rdev->gart.table.vram.robj == NULL) {
53
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
54
		return -EINVAL;
55
	}
56
	r = radeon_gart_table_vram_pin(rdev);
57
	if (r)
58
		return r;
59
	/* Setup L2 cache */
60
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
61
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
62
				EFFECTIVE_L2_QUEUE_SIZE(7));
63
	WREG32(VM_L2_CNTL2, 0);
64
	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
65
	/* Setup TLB control */
66
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
67
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
68
		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
69
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
70
	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
71
	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
72
	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
73
	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
74
	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
75
	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
76
	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
77
	WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
78
	WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
79
	WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
80
	WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
81
				RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
82
	WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
83
			(u32)(rdev->dummy_page.addr >> 12));
84
	for (i = 1; i < 7; i++)
85
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
86
 
87
	r600_pcie_gart_tlb_flush(rdev);
88
	rdev->gart.ready = true;
89
	return 0;
90
}
91
 
92
void rv770_pcie_gart_disable(struct radeon_device *rdev)
93
{
94
	u32 tmp;
1403 serge 95
	int i, r;
1246 serge 96
 
97
	/* Disable all tables */
98
	for (i = 0; i < 7; i++)
99
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
100
 
101
	/* Setup L2 cache */
102
	WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
103
				EFFECTIVE_L2_QUEUE_SIZE(7));
104
	WREG32(VM_L2_CNTL2, 0);
105
	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
106
	/* Setup TLB control */
107
	tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
108
	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
109
	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
110
	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
111
	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
112
	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
113
	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
114
	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
115
	if (rdev->gart.table.vram.robj) {
116
//       radeon_object_kunmap(rdev->gart.table.vram.robj);
117
//       radeon_object_unpin(rdev->gart.table.vram.robj);
118
	}
119
}
120
 
121
void rv770_pcie_gart_fini(struct radeon_device *rdev)
122
{
123
	rv770_pcie_gart_disable(rdev);
124
//   radeon_gart_table_vram_free(rdev);
125
    radeon_gart_fini(rdev);
126
}
127
 
128
 
129
void rv770_agp_enable(struct radeon_device *rdev)
130
{
131
	u32 tmp;
132
	int i;
133
 
134
	/* Setup L2 cache */
135
	WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
136
				ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
137
				EFFECTIVE_L2_QUEUE_SIZE(7));
138
	WREG32(VM_L2_CNTL2, 0);
139
	WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
140
	/* Setup TLB control */
141
	tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
142
		SYSTEM_ACCESS_MODE_NOT_IN_SYS |
143
		SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
144
		EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
145
	WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
146
	WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
147
	WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
148
	WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
149
	WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
150
	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
151
	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
152
	for (i = 0; i < 7; i++)
153
		WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
154
}
155
 
156
static void rv770_mc_program(struct radeon_device *rdev)
157
{
158
	struct rv515_mc_save save;
159
	u32 tmp;
160
	int i, j;
161
 
162
	/* Initialize HDP */
163
	for (i = 0, j = 0; i < 32; i++, j += 0x18) {
164
		WREG32((0x2c14 + j), 0x00000000);
165
		WREG32((0x2c18 + j), 0x00000000);
166
		WREG32((0x2c1c + j), 0x00000000);
167
		WREG32((0x2c20 + j), 0x00000000);
168
		WREG32((0x2c24 + j), 0x00000000);
169
	}
170
	WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
171
 
172
	rv515_mc_stop(rdev, &save);
173
	if (r600_mc_wait_for_idle(rdev)) {
174
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
175
	}
176
	/* Lockout access through VGA aperture*/
177
	WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
178
	/* Update configuration */
179
	if (rdev->flags & RADEON_IS_AGP) {
180
		if (rdev->mc.vram_start < rdev->mc.gtt_start) {
181
			/* VRAM before AGP */
182
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
183
				rdev->mc.vram_start >> 12);
184
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
185
				rdev->mc.gtt_end >> 12);
186
		} else {
187
			/* VRAM after AGP */
188
			WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
189
				rdev->mc.gtt_start >> 12);
190
			WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
191
				rdev->mc.vram_end >> 12);
192
		}
193
	} else {
194
		WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
195
			rdev->mc.vram_start >> 12);
196
		WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
197
			rdev->mc.vram_end >> 12);
198
	}
199
	WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
200
	tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
201
	tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
202
	WREG32(MC_VM_FB_LOCATION, tmp);
203
	WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
204
	WREG32(HDP_NONSURFACE_INFO, (2 << 7));
205
	WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
206
	if (rdev->flags & RADEON_IS_AGP) {
207
		WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
208
		WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
209
		WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
210
	} else {
211
		WREG32(MC_VM_AGP_BASE, 0);
212
		WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
213
		WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
214
	}
215
	if (r600_mc_wait_for_idle(rdev)) {
216
		dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
217
	}
218
	rv515_mc_resume(rdev, &save);
219
	/* we need to own VRAM, so turn off the VGA renderer here
220
	 * to stop it overwriting our objects */
221
	rv515_vga_render_disable(rdev);
222
}
223
 
224
 
225
/*
226
 * CP.
227
 */
228
void r700_cp_stop(struct radeon_device *rdev)
229
{
230
	WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
231
}
232
 
1403 serge 233
#if 0
1246 serge 234
static int rv770_cp_load_microcode(struct radeon_device *rdev)
235
{
236
	const __be32 *fw_data;
237
	int i;
238
 
239
	if (!rdev->me_fw || !rdev->pfp_fw)
240
		return -EINVAL;
241
 
242
	r700_cp_stop(rdev);
243
	WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0));
244
 
245
	/* Reset cp */
246
	WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
247
	RREG32(GRBM_SOFT_RESET);
248
	mdelay(15);
249
	WREG32(GRBM_SOFT_RESET, 0);
250
 
251
	fw_data = (const __be32 *)rdev->pfp_fw->data;
252
	WREG32(CP_PFP_UCODE_ADDR, 0);
253
	for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
254
		WREG32(CP_PFP_UCODE_DATA, be32_to_cpup(fw_data++));
255
	WREG32(CP_PFP_UCODE_ADDR, 0);
256
 
257
	fw_data = (const __be32 *)rdev->me_fw->data;
258
	WREG32(CP_ME_RAM_WADDR, 0);
259
	for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
260
		WREG32(CP_ME_RAM_DATA, be32_to_cpup(fw_data++));
261
 
262
	WREG32(CP_PFP_UCODE_ADDR, 0);
263
	WREG32(CP_ME_RAM_WADDR, 0);