Subversion Repositories Kolibri OS

Rev

Rev 1123 | Rev 1126 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1123 Rev 1125
1
/*
1
/*
2
 * Copyright 2008 Advanced Micro Devices, Inc.
2
 * Copyright 2008 Advanced Micro Devices, Inc.
3
 * Copyright 2008 Red Hat Inc.
3
 * Copyright 2008 Red Hat Inc.
4
 * Copyright 2009 Jerome Glisse.
4
 * Copyright 2009 Jerome Glisse.
5
 *
5
 *
6
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * Permission is hereby granted, free of charge, to any person obtaining a
7
 * copy of this software and associated documentation files (the "Software"),
7
 * copy of this software and associated documentation files (the "Software"),
8
 * to deal in the Software without restriction, including without limitation
8
 * to deal in the Software without restriction, including without limitation
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * and/or sell copies of the Software, and to permit persons to whom the
11
 * Software is furnished to do so, subject to the following conditions:
11
 * Software is furnished to do so, subject to the following conditions:
12
 *
12
 *
13
 * The above copyright notice and this permission notice shall be included in
13
 * The above copyright notice and this permission notice shall be included in
14
 * all copies or substantial portions of the Software.
14
 * all copies or substantial portions of the Software.
15
 *
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
 * OTHER DEALINGS IN THE SOFTWARE.
22
 * OTHER DEALINGS IN THE SOFTWARE.
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
//#include 
29
 
29
 
30
#include 
30
#include 
31
//#include 
31
//#include 
32
#include "radeon_drm.h"
32
#include "radeon_drm.h"
33
#include "radeon_reg.h"
33
#include "radeon_reg.h"
34
#include "radeon.h"
34
#include "radeon.h"
35
#include "radeon_asic.h"
35
#include "radeon_asic.h"
36
#include "atom.h"
36
#include "atom.h"
37
 
37
 
38
#include 
38
#include 
39
 
-
 
40
int radeon_modeset = -1;
39
 
41
int radeon_dynclks = -1;
40
int radeon_dynclks = -1;
42
int radeon_r4xx_atom = 0;
41
int radeon_r4xx_atom = 0;
43
int radeon_agpmode = 0;
-
 
44
int radeon_vram_limit = 0;
42
int radeon_agpmode   = -1;
45
int radeon_gart_size = 512; /* default gart size */
43
int radeon_gart_size = 512; /* default gart size */
46
int radeon_benchmarking = 0;
44
int radeon_benchmarking = 0;
47
int radeon_connector_table = 0;
45
int radeon_connector_table = 0;
48
 
46
 
49
 
47
 
50
/*
48
/*
51
 * Clear GPU surface registers.
49
 * Clear GPU surface registers.
52
 */
50
 */
53
static void radeon_surface_init(struct radeon_device *rdev)
51
static void radeon_surface_init(struct radeon_device *rdev)
54
{
52
{
55
    dbgprintf("%s\n",__FUNCTION__);
53
    dbgprintf("%s\n",__FUNCTION__);
56
 
54
 
57
    /* FIXME: check this out */
55
    /* FIXME: check this out */
58
    if (rdev->family < CHIP_R600) {
56
    if (rdev->family < CHIP_R600) {
59
        int i;
57
        int i;
60
 
58
 
61
        for (i = 0; i < 8; i++) {
59
        for (i = 0; i < 8; i++) {
62
            WREG32(RADEON_SURFACE0_INFO +
60
            WREG32(RADEON_SURFACE0_INFO +
63
                   i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
61
                   i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
64
                   0);
62
                   0);
65
        }
63
        }
66
    }
64
    }
67
}
65
}
68
 
66
 
69
/*
67
/*
70
 * GPU scratch registers helpers function.
68
 * GPU scratch registers helpers function.
71
 */
69
 */
72
static void radeon_scratch_init(struct radeon_device *rdev)
70
static void radeon_scratch_init(struct radeon_device *rdev)
73
{
71
{
74
    int i;
72
    int i;
75
 
73
 
76
    /* FIXME: check this out */
74
    /* FIXME: check this out */
77
    if (rdev->family < CHIP_R300) {
75
    if (rdev->family < CHIP_R300) {
78
        rdev->scratch.num_reg = 5;
76
        rdev->scratch.num_reg = 5;
79
    } else {
77
    } else {
80
        rdev->scratch.num_reg = 7;
78
        rdev->scratch.num_reg = 7;
81
    }
79
    }
82
    for (i = 0; i < rdev->scratch.num_reg; i++) {
80
    for (i = 0; i < rdev->scratch.num_reg; i++) {
83
        rdev->scratch.free[i] = true;
81
        rdev->scratch.free[i] = true;
84
        rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
82
        rdev->scratch.reg[i] = RADEON_SCRATCH_REG0 + (i * 4);
85
    }
83
    }
86
}
84
}
87
 
85
 
88
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
86
int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
89
{
87
{
90
	int i;
88
	int i;
91
 
89
 
92
	for (i = 0; i < rdev->scratch.num_reg; i++) {
90
	for (i = 0; i < rdev->scratch.num_reg; i++) {
93
		if (rdev->scratch.free[i]) {
91
		if (rdev->scratch.free[i]) {
94
			rdev->scratch.free[i] = false;
92
			rdev->scratch.free[i] = false;
95
			*reg = rdev->scratch.reg[i];
93
			*reg = rdev->scratch.reg[i];
96
			return 0;
94
			return 0;
97
		}
95
		}
98
	}
96
	}
99
	return -EINVAL;
97
	return -EINVAL;
100
}
98
}
101
 
99
 
102
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
100
void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
103
{
101
{
104
	int i;
102
	int i;
105
 
103
 
106
	for (i = 0; i < rdev->scratch.num_reg; i++) {
104
	for (i = 0; i < rdev->scratch.num_reg; i++) {
107
		if (rdev->scratch.reg[i] == reg) {
105
		if (rdev->scratch.reg[i] == reg) {
108
			rdev->scratch.free[i] = true;
106
			rdev->scratch.free[i] = true;
109
			return;
107
			return;
110
		}
108
		}
111
	}
109
	}
112
}
110
}
113
 
111
 
114
/*
112
/*
115
 * MC common functions
113
 * MC common functions
116
 */
114
 */
117
int radeon_mc_setup(struct radeon_device *rdev)
115
int radeon_mc_setup(struct radeon_device *rdev)
118
{
116
{
119
	uint32_t tmp;
117
	uint32_t tmp;
120
 
118
 
121
	/* Some chips have an "issue" with the memory controller, the
119
	/* Some chips have an "issue" with the memory controller, the
122
	 * location must be aligned to the size. We just align it down,
120
	 * location must be aligned to the size. We just align it down,
123
	 * too bad if we walk over the top of system memory, we don't
121
	 * too bad if we walk over the top of system memory, we don't
124
	 * use DMA without a remapped anyway.
122
	 * use DMA without a remapped anyway.
125
	 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
123
	 * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
126
	 */
124
	 */
127
	/* FGLRX seems to setup like this, VRAM a 0, then GART.
125
	/* FGLRX seems to setup like this, VRAM a 0, then GART.
128
	 */
126
	 */
129
/*
127
/*
130
	 * Note: from R6xx the address space is 40bits but here we only
128
	 * Note: from R6xx the address space is 40bits but here we only
131
	 * use 32bits (still have to see a card which would exhaust 4G
129
	 * use 32bits (still have to see a card which would exhaust 4G
132
	 * address space).
130
	 * address space).
133
	 */
131
	 */
134
	if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
132
	if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
135
		/* vram location was already setup try to put gtt after
133
		/* vram location was already setup try to put gtt after
136
		 * if it fits */
134
		 * if it fits */
137
		tmp = rdev->mc.vram_location + rdev->mc.vram_size;
135
		tmp = rdev->mc.vram_location + rdev->mc.vram_size;
138
		tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
136
		tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
139
		if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
137
		if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
140
			rdev->mc.gtt_location = tmp;
138
			rdev->mc.gtt_location = tmp;
141
		} else {
139
		} else {
142
			if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
140
			if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
143
				printk(KERN_ERR "[drm] GTT too big to fit "
141
				printk(KERN_ERR "[drm] GTT too big to fit "
144
				       "before or after vram location.\n");
142
				       "before or after vram location.\n");
145
				return -EINVAL;
143
				return -EINVAL;
146
			}
144
			}
147
			rdev->mc.gtt_location = 0;
145
			rdev->mc.gtt_location = 0;
148
		}
146
		}
149
	} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
147
	} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
150
		/* gtt location was already setup try to put vram before
148
		/* gtt location was already setup try to put vram before
151
		 * if it fits */
149
		 * if it fits */
152
		if (rdev->mc.vram_size < rdev->mc.gtt_location) {
150
		if (rdev->mc.vram_size < rdev->mc.gtt_location) {
153
			rdev->mc.vram_location = 0;
151
			rdev->mc.vram_location = 0;
154
		} else {
152
		} else {
155
			tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
153
			tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
156
			tmp += (rdev->mc.vram_size - 1);
154
			tmp += (rdev->mc.vram_size - 1);
157
			tmp &= ~(rdev->mc.vram_size - 1);
155
			tmp &= ~(rdev->mc.vram_size - 1);
158
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
156
			if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) {
159
				rdev->mc.vram_location = tmp;
157
				rdev->mc.vram_location = tmp;
160
			} else {
158
			} else {
161
				printk(KERN_ERR "[drm] vram too big to fit "
159
				printk(KERN_ERR "[drm] vram too big to fit "
162
				       "before or after GTT location.\n");
160
				       "before or after GTT location.\n");
163
				return -EINVAL;
161
				return -EINVAL;
164
			}
162
			}
165
		}
163
		}
166
	} else {
164
	} else {
167
		rdev->mc.vram_location = 0;
165
		rdev->mc.vram_location = 0;
168
		rdev->mc.gtt_location = rdev->mc.vram_size;
166
		rdev->mc.gtt_location = rdev->mc.vram_size;
169
	}
167
	}
170
	DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
168
	DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20);
171
	DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
169
	DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
172
		 rdev->mc.vram_location,
170
		 rdev->mc.vram_location,
173
		 rdev->mc.vram_location + rdev->mc.vram_size - 1);
171
		 rdev->mc.vram_location + rdev->mc.vram_size - 1);
174
	DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
172
	DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20);
175
	DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
173
	DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
176
		 rdev->mc.gtt_location,
174
		 rdev->mc.gtt_location,
177
		 rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
175
		 rdev->mc.gtt_location + rdev->mc.gtt_size - 1);
178
	return 0;
176
	return 0;
179
}
177
}
180
 
178
 
181
 
179
 
182
/*
180
/*
183
 * GPU helpers function.
181
 * GPU helpers function.
184
 */
182
 */
185
static bool radeon_card_posted(struct radeon_device *rdev)
183
static bool radeon_card_posted(struct radeon_device *rdev)
186
{
184
{
187
	uint32_t reg;
185
	uint32_t reg;
188
 
186
 
189
    dbgprintf("%s\n",__FUNCTION__);
187
    dbgprintf("%s\n",__FUNCTION__);
190
 
188
 
191
	/* first check CRTCs */
189
	/* first check CRTCs */
192
	if (ASIC_IS_AVIVO(rdev)) {
190
	if (ASIC_IS_AVIVO(rdev)) {
193
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
191
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
194
		      RREG32(AVIVO_D2CRTC_CONTROL);
192
		      RREG32(AVIVO_D2CRTC_CONTROL);
195
		if (reg & AVIVO_CRTC_EN) {
193
		if (reg & AVIVO_CRTC_EN) {
196
			return true;
194
			return true;
197
		}
195
		}
198
	} else {
196
	} else {
199
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
197
		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
200
		      RREG32(RADEON_CRTC2_GEN_CNTL);
198
		      RREG32(RADEON_CRTC2_GEN_CNTL);
201
		if (reg & RADEON_CRTC_EN) {
199
		if (reg & RADEON_CRTC_EN) {
202
			return true;
200
			return true;
203
		}
201
		}
204
	}
202
	}
205
 
203
 
206
	/* then check MEM_SIZE, in case the crtcs are off */
204
	/* then check MEM_SIZE, in case the crtcs are off */
207
	if (rdev->family >= CHIP_R600)
205
	if (rdev->family >= CHIP_R600)
208
		reg = RREG32(R600_CONFIG_MEMSIZE);
206
		reg = RREG32(R600_CONFIG_MEMSIZE);
209
	else
207
	else
210
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
208
		reg = RREG32(RADEON_CONFIG_MEMSIZE);
211
 
209
 
212
	if (reg)
210
	if (reg)
213
		return true;
211
		return true;
214
 
212
 
215
	return false;
213
	return false;
216
 
214
 
217
}
215
}
218
 
216
 
219
 
217
 
220
/*
218
/*
221
 * Registers accessors functions.
219
 * Registers accessors functions.
222
 */
220
 */
223
uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
221
uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
224
{
222
{
225
    DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
223
    DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
226
    BUG_ON(1);
224
    BUG_ON(1);
227
    return 0;
225
    return 0;
228
}
226
}
229
 
227
 
230
void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
228
void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
231
{
229
{
232
    DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
230
    DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
233
          reg, v);
231
          reg, v);
234
    BUG_ON(1);
232
    BUG_ON(1);
235
}
233
}
236
 
234
 
237
void radeon_register_accessor_init(struct radeon_device *rdev)
235
void radeon_register_accessor_init(struct radeon_device *rdev)
238
{
236
{
239
 
237
 
240
    dbgprintf("%s\n",__FUNCTION__);
238
    dbgprintf("%s\n",__FUNCTION__);
241
 
239
 
242
    rdev->mm_rreg = &r100_mm_rreg;
240
    rdev->mm_rreg = &r100_mm_rreg;
243
    rdev->mm_wreg = &r100_mm_wreg;
241
    rdev->mm_wreg = &r100_mm_wreg;
244
    rdev->mc_rreg = &radeon_invalid_rreg;
242
    rdev->mc_rreg = &radeon_invalid_rreg;
245
    rdev->mc_wreg = &radeon_invalid_wreg;
243
    rdev->mc_wreg = &radeon_invalid_wreg;
246
    rdev->pll_rreg = &radeon_invalid_rreg;
244
    rdev->pll_rreg = &radeon_invalid_rreg;
247
    rdev->pll_wreg = &radeon_invalid_wreg;
245
    rdev->pll_wreg = &radeon_invalid_wreg;
248
    rdev->pcie_rreg = &radeon_invalid_rreg;
246
    rdev->pcie_rreg = &radeon_invalid_rreg;
249
    rdev->pcie_wreg = &radeon_invalid_wreg;
247
    rdev->pcie_wreg = &radeon_invalid_wreg;
250
    rdev->pciep_rreg = &radeon_invalid_rreg;
248
    rdev->pciep_rreg = &radeon_invalid_rreg;
251
    rdev->pciep_wreg = &radeon_invalid_wreg;
249
    rdev->pciep_wreg = &radeon_invalid_wreg;
252
 
250
 
253
    /* Don't change order as we are overridding accessor. */
251
    /* Don't change order as we are overridding accessor. */
254
    if (rdev->family < CHIP_RV515) {
252
    if (rdev->family < CHIP_RV515) {
255
//        rdev->pcie_rreg = &rv370_pcie_rreg;
253
//        rdev->pcie_rreg = &rv370_pcie_rreg;
256
//        rdev->pcie_wreg = &rv370_pcie_wreg;
254
//        rdev->pcie_wreg = &rv370_pcie_wreg;
257
    }
255
    }
258
    if (rdev->family >= CHIP_RV515) {
256
    if (rdev->family >= CHIP_RV515) {
259
        rdev->pcie_rreg = &rv515_pcie_rreg;
257
        rdev->pcie_rreg = &rv515_pcie_rreg;
260
        rdev->pcie_wreg = &rv515_pcie_wreg;
258
        rdev->pcie_wreg = &rv515_pcie_wreg;
261
    }
259
    }
262
    /* FIXME: not sure here */
260
    /* FIXME: not sure here */
263
    if (rdev->family <= CHIP_R580) {
261
    if (rdev->family <= CHIP_R580) {
264
        rdev->pll_rreg = &r100_pll_rreg;
262
        rdev->pll_rreg = &r100_pll_rreg;
265
        rdev->pll_wreg = &r100_pll_wreg;
263
        rdev->pll_wreg = &r100_pll_wreg;
266
    }
264
    }
267
    if (rdev->family >= CHIP_RV515) {
265
    if (rdev->family >= CHIP_RV515) {
268
        rdev->mc_rreg = &rv515_mc_rreg;
266
        rdev->mc_rreg = &rv515_mc_rreg;
269
        rdev->mc_wreg = &rv515_mc_wreg;
267
        rdev->mc_wreg = &rv515_mc_wreg;
270
    }
268
    }
271
    if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
269
    if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
272
//        rdev->mc_rreg = &rs400_mc_rreg;
270
//        rdev->mc_rreg = &rs400_mc_rreg;
273
//        rdev->mc_wreg = &rs400_mc_wreg;
271
//        rdev->mc_wreg = &rs400_mc_wreg;
274
    }
272
    }
275
    if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
273
    if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
276
//        rdev->mc_rreg = &rs690_mc_rreg;
274
//        rdev->mc_rreg = &rs690_mc_rreg;
277
//        rdev->mc_wreg = &rs690_mc_wreg;
275
//        rdev->mc_wreg = &rs690_mc_wreg;
278
    }
276
    }
279
    if (rdev->family == CHIP_RS600) {
277
    if (rdev->family == CHIP_RS600) {
280
//        rdev->mc_rreg = &rs600_mc_rreg;
278
//        rdev->mc_rreg = &rs600_mc_rreg;
281
//        rdev->mc_wreg = &rs600_mc_wreg;
279
//        rdev->mc_wreg = &rs600_mc_wreg;
282
    }
280
    }
283
    if (rdev->family >= CHIP_R600) {
281
    if (rdev->family >= CHIP_R600) {
284
//        rdev->pciep_rreg = &r600_pciep_rreg;
282
//        rdev->pciep_rreg = &r600_pciep_rreg;
285
//        rdev->pciep_wreg = &r600_pciep_wreg;
283
//        rdev->pciep_wreg = &r600_pciep_wreg;
286
    }
284
    }
287
}
285
}
288
 
286
 
289
 
287
 
290
 
288
 
291
/*
289
/*
292
 * ASIC
290
 * ASIC
293
 */
291
 */
294
int radeon_asic_init(struct radeon_device *rdev)
292
int radeon_asic_init(struct radeon_device *rdev)
295
{
293
{
296
 
294
 
297
    dbgprintf("%s\n",__FUNCTION__);
295
    dbgprintf("%s\n",__FUNCTION__);
298
 
296
 
299
    radeon_register_accessor_init(rdev);
297
    radeon_register_accessor_init(rdev);
300
	switch (rdev->family) {
298
	switch (rdev->family) {
301
	case CHIP_R100:
299
	case CHIP_R100:
302
	case CHIP_RV100:
300
	case CHIP_RV100:
303
	case CHIP_RS100:
301
	case CHIP_RS100:
304
	case CHIP_RV200:
302
	case CHIP_RV200:
305
	case CHIP_RS200:
303
	case CHIP_RS200:
306
	case CHIP_R200:
304
	case CHIP_R200:
307
	case CHIP_RV250:
305
	case CHIP_RV250:
308
	case CHIP_RS300:
306
	case CHIP_RS300:
309
	case CHIP_RV280:
307
	case CHIP_RV280:
310
//       rdev->asic = &r100_asic;
308
//       rdev->asic = &r100_asic;
311
		break;
309
		break;
312
	case CHIP_R300:
310
	case CHIP_R300:
313
	case CHIP_R350:
311
	case CHIP_R350:
314
	case CHIP_RV350:
312
	case CHIP_RV350:
315
	case CHIP_RV380:
313
	case CHIP_RV380:
316
//       rdev->asic = &r300_asic;
314
//       rdev->asic = &r300_asic;
317
		break;
315
		break;
318
	case CHIP_R420:
316
	case CHIP_R420:
319
	case CHIP_R423:
317
	case CHIP_R423:
320
	case CHIP_RV410:
318
	case CHIP_RV410:
321
//       rdev->asic = &r420_asic;
319
//       rdev->asic = &r420_asic;
322
		break;
320
		break;
323
	case CHIP_RS400:
321
	case CHIP_RS400:
324
	case CHIP_RS480:
322
	case CHIP_RS480:
325
//       rdev->asic = &rs400_asic;
323
//       rdev->asic = &rs400_asic;
326
		break;
324
		break;
327
	case CHIP_RS600:
325
	case CHIP_RS600:
328
//       rdev->asic = &rs600_asic;
326
//       rdev->asic = &rs600_asic;
329
		break;
327
		break;
330
	case CHIP_RS690:
328
	case CHIP_RS690:
331
	case CHIP_RS740:
329
	case CHIP_RS740:
332
//       rdev->asic = &rs690_asic;
330
//       rdev->asic = &rs690_asic;
333
		break;
331
		break;
334
	case CHIP_RV515:
332
	case CHIP_RV515:
335
//       rdev->asic = &rv515_asic;
333
//       rdev->asic = &rv515_asic;
336
		break;
334
		break;
337
	case CHIP_R520:
335
	case CHIP_R520:
338
	case CHIP_RV530:
336
	case CHIP_RV530:
339
	case CHIP_RV560:
337
	case CHIP_RV560:
340
	case CHIP_RV570:
338
	case CHIP_RV570:
341
	case CHIP_R580:
339
	case CHIP_R580:
342
        rdev->asic = &r520_asic;
340
        rdev->asic = &r520_asic;
343
		break;
341
		break;
344
	case CHIP_R600:
342
	case CHIP_R600:
345
	case CHIP_RV610:
343
	case CHIP_RV610:
346
	case CHIP_RV630:
344
	case CHIP_RV630:
347
	case CHIP_RV620:
345
	case CHIP_RV620:
348
	case CHIP_RV635:
346
	case CHIP_RV635:
349
	case CHIP_RV670:
347
	case CHIP_RV670:
350
	case CHIP_RS780:
348
	case CHIP_RS780:
351
	case CHIP_RV770:
349
	case CHIP_RV770:
352
	case CHIP_RV730:
350
	case CHIP_RV730:
353
	case CHIP_RV710:
351
	case CHIP_RV710:
354
	default:
352
	default:
355
		/* FIXME: not supported yet */
353
		/* FIXME: not supported yet */
356
		return -EINVAL;
354
		return -EINVAL;
357
	}
355
	}
358
	return 0;
356
	return 0;
359
}
357
}
360
 
358
 
361
 
359
 
362
/*
360
/*
363
 * Wrapper around modesetting bits.
361
 * Wrapper around modesetting bits.
364
 */
362
 */
365
int radeon_clocks_init(struct radeon_device *rdev)
363
int radeon_clocks_init(struct radeon_device *rdev)
366
{
364
{
367
	int r;
365
	int r;
368
 
366
 
369
    dbgprintf("%s\n",__FUNCTION__);
367
    dbgprintf("%s\n",__FUNCTION__);
370
 
368
 
371
    radeon_get_clock_info(rdev->ddev);
369
    radeon_get_clock_info(rdev->ddev);
372
    r = radeon_static_clocks_init(rdev->ddev);
370
    r = radeon_static_clocks_init(rdev->ddev);
373
	if (r) {
371
	if (r) {
374
		return r;
372
		return r;
375
	}
373
	}
376
	DRM_INFO("Clocks initialized !\n");
374
	DRM_INFO("Clocks initialized !\n");
377
	return 0;
375
	return 0;
378
}
376
}
379
 
377
 
380
void radeon_clocks_fini(struct radeon_device *rdev)
378
void radeon_clocks_fini(struct radeon_device *rdev)
381
{
379
{
382
}
380
}
383
 
381
 
384
/* ATOM accessor methods */
382
/* ATOM accessor methods */
385
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
383
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
386
{
384
{
387
    struct radeon_device *rdev = info->dev->dev_private;
385
    struct radeon_device *rdev = info->dev->dev_private;
388
    uint32_t r;
386
    uint32_t r;
389
 
387
 
390
    r = rdev->pll_rreg(rdev, reg);
388
    r = rdev->pll_rreg(rdev, reg);
391
    return r;
389
    return r;
392
}
390
}
393
 
391
 
394
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
392
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
395
{
393
{
396
    struct radeon_device *rdev = info->dev->dev_private;
394
    struct radeon_device *rdev = info->dev->dev_private;
397
 
395
 
398
    rdev->pll_wreg(rdev, reg, val);
396
    rdev->pll_wreg(rdev, reg, val);
399
}
397
}
400
 
398
 
401
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
399
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
402
{
400
{
403
    struct radeon_device *rdev = info->dev->dev_private;
401
    struct radeon_device *rdev = info->dev->dev_private;
404
    uint32_t r;
402
    uint32_t r;
405
 
403
 
406
    r = rdev->mc_rreg(rdev, reg);
404
    r = rdev->mc_rreg(rdev, reg);
407
    return r;
405
    return r;
408
}
406
}
409
 
407
 
410
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
408
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
411
{
409
{
412
    struct radeon_device *rdev = info->dev->dev_private;
410
    struct radeon_device *rdev = info->dev->dev_private;
413
 
411
 
414
    rdev->mc_wreg(rdev, reg, val);
412
    rdev->mc_wreg(rdev, reg, val);
415
}
413
}
416
 
414
 
417
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
415
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
418
{
416
{
419
    struct radeon_device *rdev = info->dev->dev_private;
417
    struct radeon_device *rdev = info->dev->dev_private;
420
 
418
 
421
    WREG32(reg*4, val);
419
    WREG32(reg*4, val);
422
}
420
}
423
 
421
 
424
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
422
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
425
{
423
{
426
    struct radeon_device *rdev = info->dev->dev_private;
424
    struct radeon_device *rdev = info->dev->dev_private;
427
    uint32_t r;
425
    uint32_t r;
428
 
426
 
429
    r = RREG32(reg*4);
427
    r = RREG32(reg*4);
430
    return r;
428
    return r;
431
}
429
}
432
 
430
 
433
static struct card_info atom_card_info = {
431
static struct card_info atom_card_info = {
434
    .dev = NULL,
432
    .dev = NULL,
435
    .reg_read = cail_reg_read,
433
    .reg_read = cail_reg_read,
436
    .reg_write = cail_reg_write,
434
    .reg_write = cail_reg_write,
437
    .mc_read = cail_mc_read,
435
    .mc_read = cail_mc_read,
438
    .mc_write = cail_mc_write,
436
    .mc_write = cail_mc_write,
439
    .pll_read = cail_pll_read,
437
    .pll_read = cail_pll_read,
440
    .pll_write = cail_pll_write,
438
    .pll_write = cail_pll_write,
441
};
439
};
442
 
440
 
443
int radeon_atombios_init(struct radeon_device *rdev)
441
int radeon_atombios_init(struct radeon_device *rdev)
444
{
442
{
445
    dbgprintf("%s\n",__FUNCTION__);
443
    dbgprintf("%s\n",__FUNCTION__);
446
 
444
 
447
    atom_card_info.dev = rdev->ddev;
445
    atom_card_info.dev = rdev->ddev;
448
    rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
446
    rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios);
449
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
447
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
450
    return 0;
448
    return 0;
451
}
449
}
452
 
450
 
453
void radeon_atombios_fini(struct radeon_device *rdev)
451
void radeon_atombios_fini(struct radeon_device *rdev)
454
{
452
{
455
	kfree(rdev->mode_info.atom_context);
453
	kfree(rdev->mode_info.atom_context);
456
}
454
}
457
 
455
 
458
int radeon_combios_init(struct radeon_device *rdev)
456
int radeon_combios_init(struct radeon_device *rdev)
459
{
457
{
460
//	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
458
//	radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
461
	return 0;
459
	return 0;
462
}
460
}
463
 
461
 
464
void radeon_combios_fini(struct radeon_device *rdev)
462
void radeon_combios_fini(struct radeon_device *rdev)
465
{
463
{
466
}
464
}
467
 
465
 
468
int radeon_modeset_init(struct radeon_device *rdev);
466
int radeon_modeset_init(struct radeon_device *rdev);
469
void radeon_modeset_fini(struct radeon_device *rdev);
467
void radeon_modeset_fini(struct radeon_device *rdev);
470
 
468
 
471
/*
469
/*
472
 * Radeon device.
470
 * Radeon device.
473
 */
471
 */
474
int radeon_device_init(struct radeon_device *rdev,
472
int radeon_device_init(struct radeon_device *rdev,
475
               struct drm_device *ddev,
473
               struct drm_device *ddev,
476
               struct pci_dev *pdev,
474
               struct pci_dev *pdev,
477
               uint32_t flags)
475
               uint32_t flags)
478
{
476
{
479
    int r, ret = -1;
477
    int r, ret = -1;
480
 
478
 
481
    dbgprintf("%s\n",__FUNCTION__);
479
    dbgprintf("%s\n",__FUNCTION__);
482
 
480
 
483
    DRM_INFO("radeon: Initializing kernel modesetting.\n");
481
    DRM_INFO("radeon: Initializing kernel modesetting.\n");
484
    rdev->shutdown = false;
482
    rdev->shutdown = false;
485
    rdev->ddev = ddev;
483
    rdev->ddev = ddev;
486
    rdev->pdev = pdev;
484
    rdev->pdev = pdev;
487
    rdev->flags = flags;
485
    rdev->flags = flags;
488
    rdev->family = flags & RADEON_FAMILY_MASK;
486
    rdev->family = flags & RADEON_FAMILY_MASK;
489
    rdev->is_atom_bios = false;
487
    rdev->is_atom_bios = false;
490
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
488
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
491
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
489
    rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
492
    rdev->gpu_lockup = false;
490
    rdev->gpu_lockup = false;
493
    /* mutex initialization are all done here so we
491
    /* mutex initialization are all done here so we
494
     * can recall function without having locking issues */
492
     * can recall function without having locking issues */
495
 //   mutex_init(&rdev->cs_mutex);
493
 //   mutex_init(&rdev->cs_mutex);
496
 //   mutex_init(&rdev->ib_pool.mutex);
494
 //   mutex_init(&rdev->ib_pool.mutex);
497
 //   mutex_init(&rdev->cp.mutex);
495
 //   mutex_init(&rdev->cp.mutex);
498
 //   rwlock_init(&rdev->fence_drv.lock);
496
 //   rwlock_init(&rdev->fence_drv.lock);
499
 
497
 
500
 
498
 
501
    if (radeon_agpmode == -1) {
499
    if (radeon_agpmode == -1) {
502
        rdev->flags &= ~RADEON_IS_AGP;
500
        rdev->flags &= ~RADEON_IS_AGP;
503
        if (rdev->family > CHIP_RV515 ||
501
        if (rdev->family > CHIP_RV515 ||
504
            rdev->family == CHIP_RV380 ||
502
            rdev->family == CHIP_RV380 ||
505
            rdev->family == CHIP_RV410 ||
503
            rdev->family == CHIP_RV410 ||
506
            rdev->family == CHIP_R423) {
504
            rdev->family == CHIP_R423) {
507
            DRM_INFO("Forcing AGP to PCIE mode\n");
505
            DRM_INFO("Forcing AGP to PCIE mode\n");
508
            rdev->flags |= RADEON_IS_PCIE;
506
            rdev->flags |= RADEON_IS_PCIE;
509
        } else {
507
        } else {
510
            DRM_INFO("Forcing AGP to PCI mode\n");
508
            DRM_INFO("Forcing AGP to PCI mode\n");
511
            rdev->flags |= RADEON_IS_PCI;
509
            rdev->flags |= RADEON_IS_PCI;
512
        }
510
        }
513
    }
511
    }
514
 
512
 
515
    /* Set asic functions */
513
    /* Set asic functions */
516
    r = radeon_asic_init(rdev);
514
    r = radeon_asic_init(rdev);
517
    if (r) {
515
    if (r) {
518
        return r;
516
        return r;
519
    }
517
    }
520
//    r = radeon_init(rdev);
-
 
521
 
518
 
522
    r = rdev->asic->init(rdev);
519
    r = rdev->asic->init(rdev);
523
 
520
 
524
    if (r) {
521
    if (r) {
525
        return r;
522
        return r;
526
    }
523
    }
527
 
524
 
528
    /* Report DMA addressing limitation */
525
    /* Report DMA addressing limitation */
529
    r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
526
    r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
530
    if (r) {
527
    if (r) {
531
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
528
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
532
    }
529
    }
533
 
530
 
534
    /* Registers mapping */
531
    /* Registers mapping */
535
    /* TODO: block userspace mapping of io register */
532
    /* TODO: block userspace mapping of io register */
536
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
533
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
537
 
534
 
538
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
535
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
539
 
536
 
540
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
537
    rdev->rmmio =  (void*)MapIoMem(rdev->rmmio_base, rdev->rmmio_size,
541
                                   PG_SW+PG_NOCACHE);
538
                                   PG_SW+PG_NOCACHE);
542
 
539
 
543
    if (rdev->rmmio == NULL) {
540
    if (rdev->rmmio == NULL) {
544
        return -ENOMEM;
541
        return -ENOMEM;
545
    }
542
    }
546
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
543
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
547
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
544
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
548
 
545
 
549
    /* Setup errata flags */
546
    /* Setup errata flags */
550
    radeon_errata(rdev);
547
    radeon_errata(rdev);
551
    /* Initialize scratch registers */
548
    /* Initialize scratch registers */
552
    radeon_scratch_init(rdev);
549
    radeon_scratch_init(rdev);
553
	/* Initialize surface registers */
550
	/* Initialize surface registers */
554
    radeon_surface_init(rdev);
551
    radeon_surface_init(rdev);
555
 
552
 
556
    /* TODO: disable VGA need to use VGA request */
553
    /* TODO: disable VGA need to use VGA request */
557
    /* BIOS*/
554
    /* BIOS*/
558
    if (!radeon_get_bios(rdev)) {
555
    if (!radeon_get_bios(rdev)) {
559
        if (ASIC_IS_AVIVO(rdev))
556
        if (ASIC_IS_AVIVO(rdev))
560
            return -EINVAL;
557
            return -EINVAL;
561
    }
558
    }
562
    if (rdev->is_atom_bios) {
559
    if (rdev->is_atom_bios) {
563
        r = radeon_atombios_init(rdev);
560
        r = radeon_atombios_init(rdev);
564
        if (r) {
561
        if (r) {
565
            return r;
562
            return r;
566
        }
563
        }
567
    } else {
564
    } else {
568
        r = radeon_combios_init(rdev);
565
        r = radeon_combios_init(rdev);
569
        if (r) {
566
        if (r) {
570
            return r;
567
            return r;
571
        }
568
        }
572
    }
569
    }
573
    /* Reset gpu before posting otherwise ATOM will enter infinite loop */
570
    /* Reset gpu before posting otherwise ATOM will enter infinite loop */
574
    if (radeon_gpu_reset(rdev)) {
571
    if (radeon_gpu_reset(rdev)) {
575
        /* FIXME: what do we want to do here ? */
572
        /* FIXME: what do we want to do here ? */
576
    }
573
    }
577
    /* check if cards are posted or not */
574
    /* check if cards are posted or not */
578
    if (!radeon_card_posted(rdev) && rdev->bios) {
575
    if (!radeon_card_posted(rdev) && rdev->bios) {
579
        DRM_INFO("GPU not posted. posting now...\n");
576
        DRM_INFO("GPU not posted. posting now...\n");
580
        if (rdev->is_atom_bios) {
577
        if (rdev->is_atom_bios) {
581
            atom_asic_init(rdev->mode_info.atom_context);
578
            atom_asic_init(rdev->mode_info.atom_context);
582
        } else {
579
        } else {
583
    //        radeon_combios_asic_init(rdev->ddev);
580
    //        radeon_combios_asic_init(rdev->ddev);
584
        }
581
        }
585
    }
582
    }
586
 
583
 
587
    /* Get vram informations */
584
    /* Get vram informations */
588
    radeon_vram_info(rdev);
585
    radeon_vram_info(rdev);
589
    /* Device is severly broken if aper size > vram size.
586
    /* Device is severly broken if aper size > vram size.
590
     * for RN50/M6/M7 - Novell bug 204882 ?
587
     * for RN50/M6/M7 - Novell bug 204882 ?
591
     */
588
     */
592
    if (rdev->mc.vram_size < rdev->mc.aper_size) {
589
    if (rdev->mc.vram_size < rdev->mc.aper_size) {
593
        rdev->mc.aper_size = rdev->mc.vram_size;
590
        rdev->mc.aper_size = rdev->mc.vram_size;
594
    }
591
    }
595
    /* Add an MTRR for the VRAM */
592
    /* Add an MTRR for the VRAM */
596
//    rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
593
//    rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
597
//                      MTRR_TYPE_WRCOMB, 1);
594
//                      MTRR_TYPE_WRCOMB, 1);
598
    DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
595
    DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n",
599
         rdev->mc.vram_size >> 20,
596
         rdev->mc.vram_size >> 20,
600
         (unsigned)rdev->mc.aper_size >> 20);
597
         (unsigned)rdev->mc.aper_size >> 20);
601
    DRM_INFO("RAM width %dbits %cDR\n",
598
    DRM_INFO("RAM width %dbits %cDR\n",
602
         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
599
         rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
603
 
600
 
604
    /* Initialize clocks */
601
    /* Initialize clocks */
605
    r = radeon_clocks_init(rdev);
602
    r = radeon_clocks_init(rdev);
606
    if (r) {
603
    if (r) {
607
        return r;
604
        return r;
608
    }
605
    }
609
 
606
 
610
    /* Initialize memory controller (also test AGP) */
607
    /* Initialize memory controller (also test AGP) */
611
    r = radeon_mc_init(rdev);
608
    r = radeon_mc_init(rdev);
612
    if (r) {
609
    if (r) {
613
        return r;
610
        return r;
614
    };
611
    };
615
 
612
 
616
 
613
 
617
    /* Fence driver */
614
    /* Fence driver */
618
//    r = radeon_fence_driver_init(rdev);
615
//    r = radeon_fence_driver_init(rdev);
619
//    if (r) {
616
//    if (r) {
620
//        return r;
617
//        return r;
621
//    }
618
//    }
622
//    r = radeon_irq_kms_init(rdev);
619
//    r = radeon_irq_kms_init(rdev);
623
//    if (r) {
620
//    if (r) {
624
//        return r;
621
//        return r;
625
//    }
622
//    }
626
    /* Memory manager */
623
    /* Memory manager */
627
    r = radeon_object_init(rdev);
624
    r = radeon_object_init(rdev);
628
    if (r) {
625
    if (r) {
629
        return r;
626
        return r;
630
    }
627
    }
631
    /* Initialize GART (initialize after TTM so we can allocate
628
    /* Initialize GART (initialize after TTM so we can allocate
632
     * memory through TTM but finalize after TTM) */
629
     * memory through TTM but finalize after TTM) */
633
    r = radeon_gart_enable(rdev);
630
    r = radeon_gart_enable(rdev);
634
//    if (!r) {
631
//    if (!r) {
635
//        r = radeon_gem_init(rdev);
632
//        r = radeon_gem_init(rdev);
636
//    }
633
//    }
637
 
634
 
638
    /* 1M ring buffer */
635
    /* 1M ring buffer */
639
    if (!r) {
636
    if (!r) {
640
        r = radeon_cp_init(rdev, 1024 * 1024);
637
        r = radeon_cp_init(rdev, 1024 * 1024);
641
    }
638
    }
642
    if (!r) {
639
//    if (!r) {
643
        r = radeon_wb_init(rdev);
640
//        r = radeon_wb_init(rdev);
644
        if (r) {
641
//        if (r) {
645
            DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
642
//            DRM_ERROR("radeon: failled initializing WB (%d).\n", r);
646
            return r;
643
//            return r;
647
        }
644
//        }
648
    }
645
//    }
-
 
646
 
649
 
647
#if 0
650
    if (!r) {
648
    if (!r) {
651
        r = radeon_ib_pool_init(rdev);
649
        r = radeon_ib_pool_init(rdev);
652
        if (r) {
650
        if (r) {
653
            DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
651
            DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
654
            return r;
652
            return r;
655
        }
653
        }
656
    }
654
    }
657
#if 0
-
 
658
 
-
 
659
    if (!r) {
655
    if (!r) {
660
        r = radeon_ib_test(rdev);
656
        r = radeon_ib_test(rdev);
661
        if (r) {
657
        if (r) {
662
            DRM_ERROR("radeon: failled testing IB (%d).\n", r);
658
            DRM_ERROR("radeon: failled testing IB (%d).\n", r);
663
            return r;
659
            return r;
664
        }
660
        }
665
    }
661
    }
-
 
662
#endif
-
 
663
 
666
    ret = r;
664
    ret = r;
667
    r = radeon_modeset_init(rdev);
665
    r = radeon_modeset_init(rdev);
668
    if (r) {
666
    if (r) {
669
        return r;
667
        return r;
670
    }
668
    }
671
    if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
669
//    if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) {
672
        rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
670
//        rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private;
673
    }
671
//    }
674
    if (!ret) {
672
    if (!ret) {
675
        DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
673
        DRM_INFO("radeon: kernel modesetting successfully initialized.\n");
676
    }
674
    }
677
//    if (radeon_benchmarking) {
675
//    if (radeon_benchmarking) {
678
//        radeon_benchmark(rdev);
676
//        radeon_benchmark(rdev);
679
//    }
677
//    }
680
 
-
 
681
#endif
-
 
682
 
678
 
683
    return ret;
679
    return -1;
684
}
680
}
685
 
681
 
686
static struct pci_device_id pciidlist[] = {
682
static struct pci_device_id pciidlist[] = {
687
    radeon_PCI_IDS
683
    radeon_PCI_IDS
688
};
684
};
689
 
685
 
690
 
686
 
691
u32_t __stdcall drvEntry(int action)
687
u32_t __stdcall drvEntry(int action)
692
{
688
{
693
    struct pci_device_id  *ent;
689
    struct pci_device_id  *ent;
694
 
690
 
695
    dev_t   device;
691
    dev_t   device;
696
    int     err;
692
    int     err;
697
    u32_t   retval = 0;
693
    u32_t   retval = 0;
698
 
694
 
699
    if(action != 1)
695
    if(action != 1)
700
        return 0;
696
        return 0;
701
 
697
 
702
    if(!dbg_open("/hd0/2/atikms.log"))
698
    if(!dbg_open("/hd0/2/atikms.log"))
703
    {
699
    {
704
        printf("Can't open /hd0/2/atikms.log\nExit\n");
700
        printf("Can't open /hd0/2/atikms.log\nExit\n");
705
        return 0;
701
        return 0;
706
    }
702
    }
707
 
703
 
708
    enum_pci_devices();
704
    enum_pci_devices();
709
 
705
 
710
    ent = find_pci_device(&device, pciidlist);
706
    ent = find_pci_device(&device, pciidlist);
711
 
707
 
712
    if( unlikely(ent == NULL) )
708
    if( unlikely(ent == NULL) )
713
    {
709
    {
714
        dbgprintf("device not found\n");
710
        dbgprintf("device not found\n");
715
        return 0;
711
        return 0;
716
    };
712
    };
717
 
713
 
718
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
714
    dbgprintf("device %x:%x\n", device.pci_dev.vendor,
719
                                device.pci_dev.device);
715
                                device.pci_dev.device);
720
 
716
 
721
    err = drm_get_dev(&device.pci_dev, ent);
717
    err = drm_get_dev(&device.pci_dev, ent);
722
 
718
 
723
    return retval;
719
    return retval;
724
};
720
};
725
 
721
 
726
/*
722
/*
727
static struct drm_driver kms_driver = {
723
static struct drm_driver kms_driver = {
728
    .driver_features =
724
    .driver_features =
729
        DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
725
        DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
730
        DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
726
        DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_GEM,
731
    .dev_priv_size = 0,
727
    .dev_priv_size = 0,
732
    .load = radeon_driver_load_kms,
728
    .load = radeon_driver_load_kms,
733
    .firstopen = radeon_driver_firstopen_kms,
729
    .firstopen = radeon_driver_firstopen_kms,
734
    .open = radeon_driver_open_kms,
730
    .open = radeon_driver_open_kms,
735
    .preclose = radeon_driver_preclose_kms,
731
    .preclose = radeon_driver_preclose_kms,
736
    .postclose = radeon_driver_postclose_kms,
732
    .postclose = radeon_driver_postclose_kms,
737
    .lastclose = radeon_driver_lastclose_kms,
733
    .lastclose = radeon_driver_lastclose_kms,
738
    .unload = radeon_driver_unload_kms,
734
    .unload = radeon_driver_unload_kms,
739
    .suspend = radeon_suspend_kms,
735
    .suspend = radeon_suspend_kms,
740
    .resume = radeon_resume_kms,
736
    .resume = radeon_resume_kms,
741
    .get_vblank_counter = radeon_get_vblank_counter_kms,
737
    .get_vblank_counter = radeon_get_vblank_counter_kms,
742
    .enable_vblank = radeon_enable_vblank_kms,
738
    .enable_vblank = radeon_enable_vblank_kms,
743
    .disable_vblank = radeon_disable_vblank_kms,
739
    .disable_vblank = radeon_disable_vblank_kms,
744
    .master_create = radeon_master_create_kms,
740
    .master_create = radeon_master_create_kms,
745
    .master_destroy = radeon_master_destroy_kms,
741
    .master_destroy = radeon_master_destroy_kms,
746
#if defined(CONFIG_DEBUG_FS)
742
#if defined(CONFIG_DEBUG_FS)
747
    .debugfs_init = radeon_debugfs_init,
743
    .debugfs_init = radeon_debugfs_init,
748
    .debugfs_cleanup = radeon_debugfs_cleanup,
744
    .debugfs_cleanup = radeon_debugfs_cleanup,
749
#endif
745
#endif
750
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
746
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
751
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
747
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
752
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
748
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
753
    .irq_handler = radeon_driver_irq_handler_kms,
749
    .irq_handler = radeon_driver_irq_handler_kms,
754
    .reclaim_buffers = drm_core_reclaim_buffers,
750
    .reclaim_buffers = drm_core_reclaim_buffers,
755
    .get_map_ofs = drm_core_get_map_ofs,
751
    .get_map_ofs = drm_core_get_map_ofs,
756
    .get_reg_ofs = drm_core_get_reg_ofs,
752
    .get_reg_ofs = drm_core_get_reg_ofs,
757
    .ioctls = radeon_ioctls_kms,
753
    .ioctls = radeon_ioctls_kms,
758
    .gem_init_object = radeon_gem_object_init,
754
    .gem_init_object = radeon_gem_object_init,
759
    .gem_free_object = radeon_gem_object_free,
755
    .gem_free_object = radeon_gem_object_free,
760
    .dma_ioctl = radeon_dma_ioctl_kms,
756
    .dma_ioctl = radeon_dma_ioctl_kms,
761
    .fops = {
757
    .fops = {
762
         .owner = THIS_MODULE,
758
         .owner = THIS_MODULE,
763
         .open = drm_open,
759
         .open = drm_open,
764
         .release = drm_release,
760
         .release = drm_release,
765
         .ioctl = drm_ioctl,
761
         .ioctl = drm_ioctl,
766
         .mmap = radeon_mmap,
762
         .mmap = radeon_mmap,
767
         .poll = drm_poll,
763
         .poll = drm_poll,
768
         .fasync = drm_fasync,
764
         .fasync = drm_fasync,
769
#ifdef CONFIG_COMPAT
765
#ifdef CONFIG_COMPAT
770
         .compat_ioctl = NULL,
766
         .compat_ioctl = NULL,
771
#endif
767
#endif
772
    },
768
    },
773
 
769
 
774
    .pci_driver = {
770
    .pci_driver = {
775
         .name = DRIVER_NAME,
771
         .name = DRIVER_NAME,
776
         .id_table = pciidlist,
772
         .id_table = pciidlist,
777
         .probe = radeon_pci_probe,
773
         .probe = radeon_pci_probe,
778
         .remove = radeon_pci_remove,
774
         .remove = radeon_pci_remove,
779
         .suspend = radeon_pci_suspend,
775
         .suspend = radeon_pci_suspend,
780
         .resume = radeon_pci_resume,
776
         .resume = radeon_pci_resume,
781
    },
777
    },
782
 
778
 
783
    .name = DRIVER_NAME,
779
    .name = DRIVER_NAME,
784
    .desc = DRIVER_DESC,
780
    .desc = DRIVER_DESC,
785
    .date = DRIVER_DATE,
781
    .date = DRIVER_DATE,
786
    .major = KMS_DRIVER_MAJOR,
782
    .major = KMS_DRIVER_MAJOR,
787
    .minor = KMS_DRIVER_MINOR,
783
    .minor = KMS_DRIVER_MINOR,
788
    .patchlevel = KMS_DRIVER_PATCHLEVEL,
784
    .patchlevel = KMS_DRIVER_PATCHLEVEL,
789
};
785
};
790
*/
786
*/
791
 
787
 
792
 
788
 
793
/*
789
/*
794
 * Driver load/unload
790
 * Driver load/unload
795
 */
791
 */
796
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
792
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
797
{
793
{
798
    struct radeon_device *rdev;
794
    struct radeon_device *rdev;
799
    int r;
795
    int r;
800
 
796
 
801
    dbgprintf("%s\n",__FUNCTION__);
797
    dbgprintf("%s\n",__FUNCTION__);
802
 
798
 
803
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
799
    rdev = kzalloc(sizeof(struct radeon_device), GFP_KERNEL);
804
    if (rdev == NULL) {
800
    if (rdev == NULL) {
805
        return -ENOMEM;
801
        return -ENOMEM;
806
    };
802
    };
807
 
803
 
808
    dev->dev_private = (void *)rdev;
804
    dev->dev_private = (void *)rdev;
809
 
805
 
810
    /* update BUS flag */
806
    /* update BUS flag */
811
//    if (drm_device_is_agp(dev)) {
807
//    if (drm_device_is_agp(dev)) {
812
        flags |= RADEON_IS_AGP;
808
        flags |= RADEON_IS_AGP;
813
//    } else if (drm_device_is_pcie(dev)) {
809
//    } else if (drm_device_is_pcie(dev)) {
814
//        flags |= RADEON_IS_PCIE;
810
//        flags |= RADEON_IS_PCIE;
815
//    } else {
811
//    } else {
816
//        flags |= RADEON_IS_PCI;
812
//        flags |= RADEON_IS_PCI;
817
//    }
813
//    }
818
 
814
 
819
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
815
    r = radeon_device_init(rdev, dev, dev->pdev, flags);
820
    if (r) {
816
    if (r) {
821
        dbgprintf("Failed to initialize Radeon, disabling IOCTL\n");
817
        dbgprintf("Failed to initialize Radeon, disabling IOCTL\n");
822
//        radeon_device_fini(rdev);
818
//        radeon_device_fini(rdev);
823
        return r;
819
        return r;
824
    }
820
    }
825
    return 0;
821
    return 0;
826
}
822
}
827
 
823
 
828
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
824
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
829
{
825
{
830
    struct drm_device *dev;
826
    struct drm_device *dev;
831
    int ret;
827
    int ret;
832
 
828
 
833
    dbgprintf("%s\n",__FUNCTION__);
829
    dbgprintf("%s\n",__FUNCTION__);
834
 
830
 
835
    dev = malloc(sizeof(*dev));
831
    dev = malloc(sizeof(*dev));
836
    if (!dev)
832
    if (!dev)
837
        return -ENOMEM;
833
        return -ENOMEM;
838
 
834
 
839
 //   ret = pci_enable_device(pdev);
835
 //   ret = pci_enable_device(pdev);
840
 //   if (ret)
836
 //   if (ret)
841
 //       goto err_g1;
837
 //       goto err_g1;
842
 
838
 
843
 //   pci_set_master(pdev);
839
 //   pci_set_master(pdev);
844
 
840
 
845
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
841
 //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
846
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
842
 //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
847
 //       goto err_g2;
843
 //       goto err_g2;
848
 //   }
844
 //   }
849
 
845
 
850
    dev->pdev = pdev;
846
    dev->pdev = pdev;
851
    dev->pci_device = pdev->device;
847
    dev->pci_device = pdev->device;
852
    dev->pci_vendor = pdev->vendor;
848
    dev->pci_vendor = pdev->vendor;
853
 
849
 
854
 //   if (drm_core_check_feature(dev, DRIVER_MODESET)) {
850
 //   if (drm_core_check_feature(dev, DRIVER_MODESET)) {
855
 //       pci_set_drvdata(pdev, dev);
851
 //       pci_set_drvdata(pdev, dev);
856
 //       ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
852
 //       ret = drm_get_minor(dev, &dev->control, DRM_MINOR_CONTROL);
857
 //       if (ret)
853
 //       if (ret)
858
 //           goto err_g2;
854
 //           goto err_g2;
859
 //   }
855
 //   }
860
 
856
 
861
 //   if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
857
 //   if ((ret = drm_get_minor(dev, &dev->primary, DRM_MINOR_LEGACY)))
862
 //       goto err_g3;
858
 //       goto err_g3;
863
 
859
 
864
 //   if (dev->driver->load) {
860
 //   if (dev->driver->load) {
865
 //       ret = dev->driver->load(dev, ent->driver_data);
861
 //       ret = dev->driver->load(dev, ent->driver_data);
866
 //       if (ret)
862
 //       if (ret)
867
 //           goto err_g4;
863
 //           goto err_g4;
868
 //   }
864
 //   }
869
 
865
 
870
      ret = radeon_driver_load_kms(dev, ent->driver_data );
866
      ret = radeon_driver_load_kms(dev, ent->driver_data );
871
      if (ret)
867
      if (ret)
872
        goto err_g4;
868
        goto err_g4;
873
 
869
 
874
 //   list_add_tail(&dev->driver_item, &driver->device_list);
870
 //   list_add_tail(&dev->driver_item, &driver->device_list);
875
 
871
 
876
 //   DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
872
 //   DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
877
 //        driver->name, driver->major, driver->minor, driver->patchlevel,
873
 //        driver->name, driver->major, driver->minor, driver->patchlevel,
878
 //        driver->date, pci_name(pdev), dev->primary->index);
874
 //        driver->date, pci_name(pdev), dev->primary->index);
879
 
875
 
880
    return 0;
876
    return 0;
881
 
877
 
882
err_g4:
878
err_g4:
883
//    drm_put_minor(&dev->primary);
879
//    drm_put_minor(&dev->primary);
884
//err_g3:
880
//err_g3:
885
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
881
//    if (drm_core_check_feature(dev, DRIVER_MODESET))
886
//        drm_put_minor(&dev->control);
882
//        drm_put_minor(&dev->control);
887
//err_g2:
883
//err_g2:
888
//    pci_disable_device(pdev);
884
//    pci_disable_device(pdev);
889
//err_g1:
885
//err_g1:
890
    free(dev);
886
    free(dev);
891
 
887
 
892
    return ret;
888
    return ret;
893
}
889
}
894
 
890
 
895
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
891
resource_size_t drm_get_resource_start(struct drm_device *dev, unsigned int resource)
896
{
892
{
897
    return pci_resource_start(dev->pdev, resource);
893
    return pci_resource_start(dev->pdev, resource);
898
}
894
}
899
 
895
 
900
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
896
resource_size_t drm_get_resource_len(struct drm_device *dev, unsigned int resource)
901
{
897
{
902
    return pci_resource_len(dev->pdev, resource);
898
    return pci_resource_len(dev->pdev, resource);
903
}
899
}
904
 
900
 
905
 
901
 
906
uint32_t __div64_32(uint64_t *n, uint32_t base)
902
uint32_t __div64_32(uint64_t *n, uint32_t base)
907
{
903
{
908
        uint64_t rem = *n;
904
        uint64_t rem = *n;
909
        uint64_t b = base;
905
        uint64_t b = base;
910
        uint64_t res, d = 1;
906
        uint64_t res, d = 1;
911
        uint32_t high = rem >> 32;
907
        uint32_t high = rem >> 32;
912
 
908
 
913
        /* Reduce the thing a bit first */
909
        /* Reduce the thing a bit first */
914
        res = 0;
910
        res = 0;
915
        if (high >= base) {
911
        if (high >= base) {
916
                high /= base;
912
                high /= base;
917
                res = (uint64_t) high << 32;
913
                res = (uint64_t) high << 32;
918
                rem -= (uint64_t) (high*base) << 32;
914
                rem -= (uint64_t) (high*base) << 32;
919
        }
915
        }
920
 
916
 
921
        while ((int64_t)b > 0 && b < rem) {
917
        while ((int64_t)b > 0 && b < rem) {
922
                b = b+b;
918
                b = b+b;
923
                d = d+d;
919
                d = d+d;
924
        }
920
        }
925
 
921
 
926
        do {
922
        do {
927
                if (rem >= b) {
923
                if (rem >= b) {
928
                        rem -= b;
924
                        rem -= b;
929
                        res += d;
925
                        res += d;
930
                }
926
                }
931
                b >>= 1;
927
                b >>= 1;
932
                d >>= 1;
928
                d >>= 1;
933
        } while (d);
929
        } while (d);
934
 
930
 
935
        *n = res;
931
        *n = res;
936
        return rem;
932
        return rem;
937
}
933
}