Subversion Repositories Kolibri OS

Rev

Rev 2005 | Rev 3120 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2005 Rev 2997
Line 25... Line 25...
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "drmP.h"
30
#include 
31
#include "radeon_reg.h"
31
#include "radeon_reg.h"
32
#include "radeon.h"
32
#include "radeon.h"
33
#include "radeon_asic.h"
33
#include "radeon_asic.h"
34
#include "atom.h"
34
#include "atom.h"
35
#include "r100d.h"
35
#include "r100d.h"
Line 158... Line 158...
158
	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
158
	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
159
}
159
}
Line 160... Line 160...
160
 
160
 
161
static void r420_cp_errata_init(struct radeon_device *rdev)
161
static void r420_cp_errata_init(struct radeon_device *rdev)
-
 
162
{
-
 
163
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
162
{
164
 
163
	/* RV410 and R420 can lock up if CP DMA to host memory happens
165
	/* RV410 and R420 can lock up if CP DMA to host memory happens
164
	 * while the 2D engine is busy.
166
	 * while the 2D engine is busy.
165
	 *
167
	 *
166
	 * The proper workaround is to queue a RESYNC at the beginning
168
	 * The proper workaround is to queue a RESYNC at the beginning
167
	 * of the CP init, apparently.
169
	 * of the CP init, apparently.
168
	 */
170
	 */
169
	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
171
	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
170
	radeon_ring_lock(rdev, 8);
172
	radeon_ring_lock(rdev, ring, 8);
171
	radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
173
	radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
172
	radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
174
	radeon_ring_write(ring, rdev->config.r300.resync_scratch);
173
	radeon_ring_write(rdev, 0xDEADBEEF);
175
	radeon_ring_write(ring, 0xDEADBEEF);
174
	radeon_ring_unlock_commit(rdev);
176
	radeon_ring_unlock_commit(rdev, ring);
Line 175... Line 177...
175
}
177
}
176
 
178
 
-
 
179
static void r420_cp_errata_fini(struct radeon_device *rdev)
-
 
180
{
177
static void r420_cp_errata_fini(struct radeon_device *rdev)
181
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
178
{
182
 
179
	/* Catch the RESYNC we dispatched all the way back,
183
	/* Catch the RESYNC we dispatched all the way back,
180
	 * at the very beginning of the CP init.
184
	 * at the very beginning of the CP init.
181
	 */
185
	 */
182
	radeon_ring_lock(rdev, 8);
186
	radeon_ring_lock(rdev, ring, 8);
183
	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
187
	radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
184
	radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
188
	radeon_ring_write(ring, R300_RB3D_DC_FINISH);
185
	radeon_ring_unlock_commit(rdev);
189
	radeon_ring_unlock_commit(rdev, ring);
Line 186... Line 190...
186
	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
190
	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
187
}
191
}
Line 223... Line 227...
223
	if (r) {
227
	if (r) {
224
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
228
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
225
		return r;
229
		return r;
226
	}
230
	}
227
	r420_cp_errata_init(rdev);
231
	r420_cp_errata_init(rdev);
-
 
232
 
228
	r = r100_ib_init(rdev);
233
	r = radeon_ib_pool_init(rdev);
229
	if (r) {
234
	if (r) {
230
		dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
235
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
231
		return r;
236
		return r;
232
	}
237
	}
-
 
238
 
233
	return 0;
239
	return 0;
234
}
240
}
Line 235... Line -...
235
 
-
 
236
int r420_resume(struct radeon_device *rdev)
241
 
237
{
-
 
238
	/* Make sur GART are not working */
-
 
239
	if (rdev->flags & RADEON_IS_PCIE)
-
 
240
		rv370_pcie_gart_disable(rdev);
-
 
241
	if (rdev->flags & RADEON_IS_PCI)
-
 
242
		r100_pci_gart_disable(rdev);
-
 
243
	/* Resume clock before doing reset */
-
 
244
	r420_clock_resume(rdev);
-
 
245
	/* Reset gpu before posting otherwise ATOM will enter infinite loop */
-
 
246
	if (radeon_asic_reset(rdev)) {
-
 
247
		dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
-
 
248
			RREG32(R_000E40_RBBM_STATUS),
-
 
249
			RREG32(R_0007C0_CP_STAT));
-
 
250
	}
-
 
251
	/* check if cards are posted or not */
-
 
252
	if (rdev->is_atom_bios) {
-
 
253
		atom_asic_init(rdev->mode_info.atom_context);
-
 
254
	} else {
-
 
255
		radeon_combios_asic_init(rdev->ddev);
-
 
256
	}
-
 
257
	/* Resume clock after posting */
-
 
258
	r420_clock_resume(rdev);
-
 
259
	/* Initialize surface registers */
-
 
260
	radeon_surface_init(rdev);
-
 
261
	return r420_startup(rdev);
242
 
Line 262... Line 243...
262
}
243
 
263
 
244
 
Line 339... Line 320...
339
		r = r100_pci_gart_init(rdev);
320
		r = r100_pci_gart_init(rdev);
340
		if (r)
321
		if (r)
341
			return r;
322
			return r;
342
	}
323
	}
343
	r420_set_reg_safe(rdev);
324
	r420_set_reg_safe(rdev);
-
 
325
 
344
	rdev->accel_working = true;
326
	rdev->accel_working = true;
345
	r = r420_startup(rdev);
327
	r = r420_startup(rdev);
346
	if (r) {
328
	if (r) {
347
		/* Somethings want wront with the accel init stop accel */
329
		/* Somethings want wront with the accel init stop accel */
348
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
330
		dev_err(rdev->dev, "Disabling GPU acceleration\n");