Subversion Repositories Kolibri OS

Rev

Rev 3764 | Rev 5139 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3764 Rev 5078
Line 59... Line 59...
59
MODULE_FIRMWARE(FIRMWARE_R420);
59
MODULE_FIRMWARE(FIRMWARE_R420);
60
MODULE_FIRMWARE(FIRMWARE_RS690);
60
MODULE_FIRMWARE(FIRMWARE_RS690);
61
MODULE_FIRMWARE(FIRMWARE_RS600);
61
MODULE_FIRMWARE(FIRMWARE_RS600);
62
MODULE_FIRMWARE(FIRMWARE_R520);
62
MODULE_FIRMWARE(FIRMWARE_R520);
Line -... Line 63...
-
 
63
 
Line 63... Line 64...
63
 
64
#include "r100_track.h"
64
 
65
 
65
/* This files gather functions specifics to:
66
/* This files gather functions specifics to:
66
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
67
 * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
Line 137... Line 138...
137
			if (!r100_is_counter_moving(rdev, crtc))
138
			if (!r100_is_counter_moving(rdev, crtc))
138
					break;
139
					break;
139
		}
140
		}
140
	}
141
	}
141
}
142
}
-
 
143
 
-
 
144
/**
-
 
145
 * r100_page_flip - pageflip callback.
-
 
146
 *
-
 
147
 * @rdev: radeon_device pointer
-
 
148
 * @crtc_id: crtc to cleanup pageflip on
-
 
149
 * @crtc_base: new address of the crtc (GPU MC address)
-
 
150
 *
-
 
151
 * Does the actual pageflip (r1xx-r4xx).
-
 
152
 * During vblank we take the crtc lock and wait for the update_pending
-
 
153
 * bit to go high, when it does, we release the lock, and allow the
-
 
154
 * double buffered update to take place.
-
 
155
 */
142
u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
156
void r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
143
{
157
{
144
	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
158
	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
145
	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
159
	u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
146
	int i;
160
	int i;
Line 159... Line 173...
159
 
173
 
160
	/* Unlock the lock, so double-buffering can take place inside vblank */
174
	/* Unlock the lock, so double-buffering can take place inside vblank */
161
	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
175
	tmp &= ~RADEON_CRTC_OFFSET__OFFSET_LOCK;
Line -... Line 176...
-
 
176
	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
-
 
177
 
-
 
178
}
-
 
179
 
-
 
180
/**
-
 
181
 * r100_page_flip_pending - check if page flip is still pending
-
 
182
 *
-
 
183
 * @rdev: radeon_device pointer
-
 
184
 * @crtc_id: crtc to check
-
 
185
 *
-
 
186
 * Check if the last pagefilp is still pending (r1xx-r4xx).
-
 
187
 * Returns the current update pending status.
-
 
188
 */
-
 
189
bool r100_page_flip_pending(struct radeon_device *rdev, int crtc_id)
-
 
190
{
162
	WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
191
	struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
163
 
192
 
-
 
193
	/* Return current update_pending status: */
-
 
194
	return !!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) &
-
 
195
		RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET);
-
 
196
}
-
 
197
 
-
 
198
/**
-
 
199
 * r100_pm_get_dynpm_state - look up dynpm power state callback.
-
 
200
 *
-
 
201
 * @rdev: radeon_device pointer
-
 
202
 *
-
 
203
 * Look up the optimal power state based on the
-
 
204
 * current state of the GPU (r1xx-r5xx).
-
 
205
 * Used for dynpm only.
-
 
206
 */
-
 
207
void r100_pm_get_dynpm_state(struct radeon_device *rdev)
-
 
208
{
-
 
209
	int i;
-
 
210
	rdev->pm.dynpm_can_upclock = true;
-
 
211
	rdev->pm.dynpm_can_downclock = true;
-
 
212
 
-
 
213
	switch (rdev->pm.dynpm_planned_action) {
-
 
214
	case DYNPM_ACTION_MINIMUM:
-
 
215
		rdev->pm.requested_power_state_index = 0;
-
 
216
		rdev->pm.dynpm_can_downclock = false;
-
 
217
		break;
-
 
218
	case DYNPM_ACTION_DOWNCLOCK:
-
 
219
		if (rdev->pm.current_power_state_index == 0) {
-
 
220
			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
-
 
221
			rdev->pm.dynpm_can_downclock = false;
-
 
222
		} else {
-
 
223
			if (rdev->pm.active_crtc_count > 1) {
-
 
224
				for (i = 0; i < rdev->pm.num_power_states; i++) {
-
 
225
					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
-
 
226
						continue;
-
 
227
					else if (i >= rdev->pm.current_power_state_index) {
-
 
228
						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
-
 
229
						break;
-
 
230
					} else {
-
 
231
						rdev->pm.requested_power_state_index = i;
-
 
232
						break;
-
 
233
					}
-
 
234
				}
-
 
235
			} else
-
 
236
				rdev->pm.requested_power_state_index =
-
 
237
					rdev->pm.current_power_state_index - 1;
-
 
238
		}
-
 
239
		/* don't use the power state if crtcs are active and no display flag is set */
-
 
240
		if ((rdev->pm.active_crtc_count > 0) &&
-
 
241
		    (rdev->pm.power_state[rdev->pm.requested_power_state_index].clock_info[0].flags &
-
 
242
		     RADEON_PM_MODE_NO_DISPLAY)) {
-
 
243
			rdev->pm.requested_power_state_index++;
-
 
244
		}
-
 
245
		break;
-
 
246
	case DYNPM_ACTION_UPCLOCK:
-
 
247
		if (rdev->pm.current_power_state_index == (rdev->pm.num_power_states - 1)) {
-
 
248
			rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
-
 
249
			rdev->pm.dynpm_can_upclock = false;
-
 
250
		} else {
-
 
251
			if (rdev->pm.active_crtc_count > 1) {
-
 
252
				for (i = (rdev->pm.num_power_states - 1); i >= 0; i--) {
-
 
253
					if (rdev->pm.power_state[i].flags & RADEON_PM_STATE_SINGLE_DISPLAY_ONLY)
-
 
254
						continue;
-
 
255
					else if (i <= rdev->pm.current_power_state_index) {
-
 
256
						rdev->pm.requested_power_state_index = rdev->pm.current_power_state_index;
-
 
257
						break;
-
 
258
					} else {
-
 
259
						rdev->pm.requested_power_state_index = i;
-
 
260
						break;
-
 
261
					}
-
 
262
				}
-
 
263
			} else
-
 
264
				rdev->pm.requested_power_state_index =
-
 
265
					rdev->pm.current_power_state_index + 1;
-
 
266
		}
-
 
267
		break;
-
 
268
	case DYNPM_ACTION_DEFAULT:
-
 
269
		rdev->pm.requested_power_state_index = rdev->pm.default_power_state_index;
-
 
270
		rdev->pm.dynpm_can_upclock = false;
-
 
271
		break;
-
 
272
	case DYNPM_ACTION_NONE:
-
 
273
	default:
-
 
274
		DRM_ERROR("Requested mode for not defined action\n");
-
 
275
		return;
-
 
276
	}
-
 
277
	/* only one clock mode per power state */
-
 
278
	rdev->pm.requested_clock_mode_index = 0;
-
 
279
 
-
 
280
	DRM_DEBUG_DRIVER("Requested: e: %d m: %d p: %d\n",
-
 
281
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
282
		  clock_info[rdev->pm.requested_clock_mode_index].sclk,
-
 
283
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
284
		  clock_info[rdev->pm.requested_clock_mode_index].mclk,
-
 
285
		  rdev->pm.power_state[rdev->pm.requested_power_state_index].
-
 
286
		  pcie_lanes);
-
 
287
}
-
 
288
 
-
 
289
/**
-
 
290
 * r100_pm_init_profile - Initialize power profiles callback.
-
 
291
 *
-
 
292
 * @rdev: radeon_device pointer
-
 
293
 *
-
 
294
 * Initialize the power states used in profile mode
-
 
295
 * (r1xx-r3xx).
-
 
296
 * Used for profile mode only.
-
 
297
 */
-
 
298
void r100_pm_init_profile(struct radeon_device *rdev)
-
 
299
{
-
 
300
	/* default */
-
 
301
	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
-
 
302
	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
303
	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
-
 
304
	rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
-
 
305
	/* low sh */
-
 
306
	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 0;
-
 
307
	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 0;
-
 
308
	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
-
 
309
	rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
-
 
310
	/* mid sh */
-
 
311
	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 0;
-
 
312
	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 0;
-
 
313
	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
-
 
314
	rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
-
 
315
	/* high sh */
-
 
316
	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 0;
-
 
317
	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
318
	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
-
 
319
	rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 0;
-
 
320
	/* low mh */
-
 
321
	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 0;
-
 
322
	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
323
	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
-
 
324
	rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
-
 
325
	/* mid mh */
-
 
326
	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 0;
-
 
327
	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
328
	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
-
 
329
	rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
-
 
330
	/* high mh */
-
 
331
	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 0;
-
 
332
	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
-
 
333
	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
-
 
334
	rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 0;
-
 
335
}
-
 
336
 
-
 
337
/**
-
 
338
 * r100_pm_misc - set additional pm hw parameters callback.
-
 
339
 *
-
 
340
 * @rdev: radeon_device pointer
-
 
341
 *
-
 
342
 * Set non-clock parameters associated with a power state
-
 
343
 * (voltage, pcie lanes, etc.) (r1xx-r4xx).
-
 
344
 */
-
 
345
void r100_pm_misc(struct radeon_device *rdev)
-
 
346
{
-
 
347
	int requested_index = rdev->pm.requested_power_state_index;
-
 
348
	struct radeon_power_state *ps = &rdev->pm.power_state[requested_index];
-
 
349
	struct radeon_voltage *voltage = &ps->clock_info[0].voltage;
-
 
350
	u32 tmp, sclk_cntl, sclk_cntl2, sclk_more_cntl;
-
 
351
 
-
 
352
	if ((voltage->type == VOLTAGE_GPIO) && (voltage->gpio.valid)) {
-
 
353
		if (ps->misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
-
 
354
			tmp = RREG32(voltage->gpio.reg);
-
 
355
			if (voltage->active_high)
-
 
356
				tmp |= voltage->gpio.mask;
-
 
357
			else
-
 
358
				tmp &= ~(voltage->gpio.mask);
-
 
359
			WREG32(voltage->gpio.reg, tmp);
-
 
360
			if (voltage->delay)
-
 
361
				udelay(voltage->delay);
-
 
362
		} else {
-
 
363
			tmp = RREG32(voltage->gpio.reg);
-
 
364
			if (voltage->active_high)
-
 
365
				tmp &= ~voltage->gpio.mask;
-
 
366
			else
-
 
367
				tmp |= voltage->gpio.mask;
-
 
368
			WREG32(voltage->gpio.reg, tmp);
-
 
369
			if (voltage->delay)
-
 
370
				udelay(voltage->delay);
-
 
371
		}
-
 
372
	}
-
 
373
 
-
 
374
	sclk_cntl = RREG32_PLL(SCLK_CNTL);
-
 
375
	sclk_cntl2 = RREG32_PLL(SCLK_CNTL2);
-
 
376
	sclk_cntl2 &= ~REDUCED_SPEED_SCLK_SEL(3);
-
 
377
	sclk_more_cntl = RREG32_PLL(SCLK_MORE_CNTL);
-
 
378
	sclk_more_cntl &= ~VOLTAGE_DELAY_SEL(3);
-
 
379
	if (ps->misc & ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN) {
-
 
380
		sclk_more_cntl |= REDUCED_SPEED_SCLK_EN;
-
 
381
		if (ps->misc & ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE)
-
 
382
			sclk_cntl2 |= REDUCED_SPEED_SCLK_MODE;
-
 
383
		else
-
 
384
			sclk_cntl2 &= ~REDUCED_SPEED_SCLK_MODE;
-
 
385
		if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2)
-
 
386
			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(0);
-
 
387
		else if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4)
-
 
388
			sclk_cntl2 |= REDUCED_SPEED_SCLK_SEL(2);
-
 
389
	} else
-
 
390
		sclk_more_cntl &= ~REDUCED_SPEED_SCLK_EN;
-
 
391
 
-
 
392
	if (ps->misc & ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN) {
-
 
393
		sclk_more_cntl |= IO_CG_VOLTAGE_DROP;
-
 
394
		if (voltage->delay) {
-
 
395
			sclk_more_cntl |= VOLTAGE_DROP_SYNC;
-
 
396
			switch (voltage->delay) {
-
 
397
			case 33:
-
 
398
				sclk_more_cntl |= VOLTAGE_DELAY_SEL(0);
-
 
399
				break;
-
 
400
			case 66:
-
 
401
				sclk_more_cntl |= VOLTAGE_DELAY_SEL(1);
-
 
402
				break;
-
 
403
			case 99:
-
 
404
				sclk_more_cntl |= VOLTAGE_DELAY_SEL(2);
-
 
405
				break;
-
 
406
			case 132:
-
 
407
				sclk_more_cntl |= VOLTAGE_DELAY_SEL(3);
-
 
408
				break;
-
 
409
			}
-
 
410
		} else
-
 
411
			sclk_more_cntl &= ~VOLTAGE_DROP_SYNC;
-
 
412
	} else
-
 
413
		sclk_more_cntl &= ~IO_CG_VOLTAGE_DROP;
-
 
414
 
-
 
415
	if (ps->misc & ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN)
-
 
416
		sclk_cntl &= ~FORCE_HDP;
-
 
417
	else
-
 
418
		sclk_cntl |= FORCE_HDP;
-
 
419
 
-
 
420
	WREG32_PLL(SCLK_CNTL, sclk_cntl);
-
 
421
	WREG32_PLL(SCLK_CNTL2, sclk_cntl2);
-
 
422
	WREG32_PLL(SCLK_MORE_CNTL, sclk_more_cntl);
-
 
423
 
-
 
424
	/* set pcie lanes */
-
 
425
	if ((rdev->flags & RADEON_IS_PCIE) &&
-
 
426
	    !(rdev->flags & RADEON_IS_IGP) &&
-
 
427
	    rdev->asic->pm.set_pcie_lanes &&
-
 
428
	    (ps->pcie_lanes !=
-
 
429
	     rdev->pm.power_state[rdev->pm.current_power_state_index].pcie_lanes)) {
-
 
430
		radeon_set_pcie_lanes(rdev,
-
 
431
				      ps->pcie_lanes);
-
 
432
		DRM_DEBUG_DRIVER("Setting: p: %d\n", ps->pcie_lanes);
-
 
433
	}
-
 
434
}
-
 
435
 
-
 
436
/**
-
 
437
 * r100_pm_prepare - pre-power state change callback.
-
 
438
 *
-
 
439
 * @rdev: radeon_device pointer
-
 
440
 *
-
 
441
 * Prepare for a power state change (r1xx-r4xx).
-
 
442
 */
-
 
443
void r100_pm_prepare(struct radeon_device *rdev)
-
 
444
{
-
 
445
	struct drm_device *ddev = rdev->ddev;
-
 
446
	struct drm_crtc *crtc;
-
 
447
	struct radeon_crtc *radeon_crtc;
-
 
448
	u32 tmp;
-
 
449
 
-
 
450
	/* disable any active CRTCs */
-
 
451
	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
-
 
452
		radeon_crtc = to_radeon_crtc(crtc);
-
 
453
		if (radeon_crtc->enabled) {
-
 
454
			if (radeon_crtc->crtc_id) {
-
 
455
				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
-
 
456
				tmp |= RADEON_CRTC2_DISP_REQ_EN_B;
-
 
457
				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
-
 
458
			} else {
-
 
459
				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
-
 
460
				tmp |= RADEON_CRTC_DISP_REQ_EN_B;
-
 
461
				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
-
 
462
			}
-
 
463
		}
-
 
464
	}
-
 
465
}
-
 
466
 
-
 
467
/**
-
 
468
 * r100_pm_finish - post-power state change callback.
-
 
469
 *
-
 
470
 * @rdev: radeon_device pointer
-
 
471
 *
-
 
472
 * Clean up after a power state change (r1xx-r4xx).
-
 
473
 */
-
 
474
void r100_pm_finish(struct radeon_device *rdev)
-
 
475
{
-
 
476
	struct drm_device *ddev = rdev->ddev;
-
 
477
	struct drm_crtc *crtc;
-
 
478
	struct radeon_crtc *radeon_crtc;
-
 
479
	u32 tmp;
-
 
480
 
-
 
481
	/* enable any active CRTCs */
-
 
482
	list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) {
-
 
483
		radeon_crtc = to_radeon_crtc(crtc);
-
 
484
		if (radeon_crtc->enabled) {
-
 
485
			if (radeon_crtc->crtc_id) {
-
 
486
				tmp = RREG32(RADEON_CRTC2_GEN_CNTL);
-
 
487
				tmp &= ~RADEON_CRTC2_DISP_REQ_EN_B;
-
 
488
				WREG32(RADEON_CRTC2_GEN_CNTL, tmp);
-
 
489
			} else {
-
 
490
				tmp = RREG32(RADEON_CRTC_GEN_CNTL);
-
 
491
				tmp &= ~RADEON_CRTC_DISP_REQ_EN_B;
-
 
492
				WREG32(RADEON_CRTC_GEN_CNTL, tmp);
-
 
493
			}
164
	/* Return current update_pending status: */
494
		}
-
 
495
	}
-
 
496
}
-
 
497
 
-
 
498
/**
-
 
499
 * r100_gui_idle - gui idle callback.
-
 
500
 *
-
 
501
 * @rdev: radeon_device pointer
-
 
502
 *
-
 
503
 * Check of the GUI (2D/3D engines) are idle (r1xx-r5xx).
165
	return RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET;
504
 * Returns true if idle, false if not.
166
}
505
 */
167
bool r100_gui_idle(struct radeon_device *rdev)
506
bool r100_gui_idle(struct radeon_device *rdev)
168
{
507
{
169
	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
508
	if (RREG32(RADEON_RBBM_STATUS) & RADEON_RBBM_ACTIVE)
Line 311... Line 650...
311
 
650
 
312
int r100_pci_gart_enable(struct radeon_device *rdev)
651
int r100_pci_gart_enable(struct radeon_device *rdev)
313
{
652
{
Line 314... Line -...
314
	uint32_t tmp;
-
 
315
 
653
	uint32_t tmp;
316
	radeon_gart_restore(rdev);
654
 
317
	/* discard memory request outside of configured range */
655
	/* discard memory request outside of configured range */
318
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
656
	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
319
	WREG32(RADEON_AIC_CNTL, tmp);
657
	WREG32(RADEON_AIC_CNTL, tmp);
Line 341... Line 679...
341
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
679
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
342
	WREG32(RADEON_AIC_LO_ADDR, 0);
680
	WREG32(RADEON_AIC_LO_ADDR, 0);
343
	WREG32(RADEON_AIC_HI_ADDR, 0);
681
	WREG32(RADEON_AIC_HI_ADDR, 0);
344
}
682
}
Line 345... Line 683...
345
 
683
 
-
 
684
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
346
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
685
			    uint64_t addr, uint32_t flags)
347
{
686
{
348
	u32 *gtt = rdev->gart.ptr;
-
 
349
 
-
 
350
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
-
 
351
		return -EINVAL;
-
 
352
	}
687
	u32 *gtt = rdev->gart.ptr;
353
	gtt[i] = cpu_to_le32(lower_32_bits(addr));
-
 
354
	return 0;
688
	gtt[i] = cpu_to_le32(lower_32_bits(addr));
Line 355... Line 689...
355
}
689
}
356
 
690
 
357
void r100_pci_gart_fini(struct radeon_device *rdev)
691
void r100_pci_gart_fini(struct radeon_device *rdev)
Line 501... Line 835...
501
	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
835
	radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
502
	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
836
	radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
503
	/* Wait until IDLE & CLEAN */
837
	/* Wait until IDLE & CLEAN */
504
	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
838
	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
505
	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
839
	radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
506
	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-
 
507
	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
-
 
508
				RADEON_HDP_READ_BUFFER_INVALIDATE);
-
 
509
	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-
 
510
	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
840
	r100_ring_hdp_flush(rdev, ring);
511
	/* Emit fence sequence & fire IRQ */
841
	/* Emit fence sequence & fire IRQ */
512
	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
842
	radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
513
	radeon_ring_write(ring, fence->seq);
843
	radeon_ring_write(ring, fence->seq);
514
	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
844
	radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
515
	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
845
	radeon_ring_write(ring, RADEON_SW_INT_FIRE);
516
}
846
}
Line 517... Line 847...
517
 
847
 
518
void r100_semaphore_ring_emit(struct radeon_device *rdev,
848
bool r100_semaphore_ring_emit(struct radeon_device *rdev,
519
			      struct radeon_ring *ring,
849
			      struct radeon_ring *ring,
520
			      struct radeon_semaphore *semaphore,
850
			      struct radeon_semaphore *semaphore,
521
			      bool emit_wait)
851
			      bool emit_wait)
522
{
852
{
523
	/* Unused on older asics, since we don't have semaphores or multiple rings */
853
	/* Unused on older asics, since we don't have semaphores or multiple rings */
-
 
854
	BUG();
524
	BUG();
855
	return false;
Line 525... Line 856...
525
}
856
}
526
 
857
 
527
int r100_copy_blit(struct radeon_device *rdev,
858
int r100_copy_blit(struct radeon_device *rdev,
Line 592... Line 923...
592
			  RADEON_WAIT_HOST_IDLECLEAN |
923
			  RADEON_WAIT_HOST_IDLECLEAN |
593
			  RADEON_WAIT_DMA_GUI_IDLE);
924
			  RADEON_WAIT_DMA_GUI_IDLE);
594
	if (fence) {
925
	if (fence) {
595
		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
926
		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
596
	}
927
	}
597
	radeon_ring_unlock_commit(rdev, ring);
928
	radeon_ring_unlock_commit(rdev, ring, false);
598
	return r;
929
	return r;
599
}
930
}
Line 600... Line 931...
600
 
931
 
601
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
932
static int r100_cp_wait_for_idle(struct radeon_device *rdev)
Line 625... Line 956...
625
	radeon_ring_write(ring,
956
	radeon_ring_write(ring,
626
			  RADEON_ISYNC_ANY2D_IDLE3D |
957
			  RADEON_ISYNC_ANY2D_IDLE3D |
627
			  RADEON_ISYNC_ANY3D_IDLE2D |
958
			  RADEON_ISYNC_ANY3D_IDLE2D |
628
			  RADEON_ISYNC_WAIT_IDLEGUI |
959
			  RADEON_ISYNC_WAIT_IDLEGUI |
629
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
960
			  RADEON_ISYNC_CPSCRATCH_IDLEGUI);
630
	radeon_ring_unlock_commit(rdev, ring);
961
	radeon_ring_unlock_commit(rdev, ring, false);
631
}
962
}
Line 632... Line 963...
632
 
963
 
633
 
964
 
634
/* Load the microcode for the CP */
965
/* Load the microcode for the CP */
635
static int r100_cp_init_microcode(struct radeon_device *rdev)
-
 
636
{
966
static int r100_cp_init_microcode(struct radeon_device *rdev)
637
	struct platform_device *pdev;
967
{
Line 638... Line 968...
638
	const char *fw_name = NULL;
968
	const char *fw_name = NULL;
Line 639... Line -...
639
	int err;
-
 
640
 
-
 
641
	DRM_DEBUG_KMS("\n");
-
 
642
 
-
 
643
    pdev = platform_device_register_simple("radeon_cp", 0, NULL, 0);
-
 
644
    err = IS_ERR(pdev);
-
 
645
    if (err) {
969
	int err;
646
        printk(KERN_ERR "radeon_cp: Failed to register firmware\n");
970
 
647
        return -EINVAL;
971
	DRM_DEBUG_KMS("\n");
648
    }
972
 
649
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
973
	if ((rdev->family == CHIP_R100) || (rdev->family == CHIP_RV100) ||
Line 685... Line 1009...
685
		   (rdev->family == CHIP_RV570)) {
1009
		   (rdev->family == CHIP_RV570)) {
686
		DRM_INFO("Loading R500 Microcode\n");
1010
		DRM_INFO("Loading R500 Microcode\n");
687
		fw_name = FIRMWARE_R520;
1011
		fw_name = FIRMWARE_R520;
688
		}
1012
		}
Line 689... Line 1013...
689
 
1013
 
690
   err = request_firmware(&rdev->me_fw, fw_name, &pdev->dev);
-
 
691
   platform_device_unregister(pdev);
1014
	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
692
   if (err) {
1015
   if (err) {
693
       printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1016
       printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
694
              fw_name);
1017
              fw_name);
695
	} else if (rdev->me_fw->size % 8) {
1018
	} else if (rdev->me_fw->size % 8) {
Line 701... Line 1024...
701
		rdev->me_fw = NULL;
1024
		rdev->me_fw = NULL;
702
	}
1025
	}
703
	return err;
1026
	return err;
704
}
1027
}
Line -... Line 1028...
-
 
1028
 
-
 
1029
u32 r100_gfx_get_rptr(struct radeon_device *rdev,
-
 
1030
		      struct radeon_ring *ring)
-
 
1031
{
-
 
1032
	u32 rptr;
-
 
1033
 
-
 
1034
	if (rdev->wb.enabled)
-
 
1035
		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
-
 
1036
	else
-
 
1037
		rptr = RREG32(RADEON_CP_RB_RPTR);
-
 
1038
 
-
 
1039
	return rptr;
-
 
1040
}
-
 
1041
 
-
 
1042
u32 r100_gfx_get_wptr(struct radeon_device *rdev,
-
 
1043
		      struct radeon_ring *ring)
-
 
1044
{
-
 
1045
	u32 wptr;
-
 
1046
 
-
 
1047
	wptr = RREG32(RADEON_CP_RB_WPTR);
-
 
1048
 
-
 
1049
	return wptr;
-
 
1050
}
-
 
1051
 
-
 
1052
void r100_gfx_set_wptr(struct radeon_device *rdev,
-
 
1053
		       struct radeon_ring *ring)
-
 
1054
{
-
 
1055
	WREG32(RADEON_CP_RB_WPTR, ring->wptr);
-
 
1056
	(void)RREG32(RADEON_CP_RB_WPTR);
-
 
1057
}
-
 
1058
 
-
 
1059
/**
-
 
1060
 * r100_ring_hdp_flush - flush Host Data Path via the ring buffer
-
 
1061
 * rdev: radeon device structure
-
 
1062
 * ring: ring buffer struct for emitting packets
-
 
1063
 */
-
 
1064
void r100_ring_hdp_flush(struct radeon_device *rdev, struct radeon_ring *ring)
-
 
1065
{
-
 
1066
	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-
 
1067
	radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
-
 
1068
				RADEON_HDP_READ_BUFFER_INVALIDATE);
-
 
1069
	radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
-
 
1070
	radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
-
 
1071
}
705
 
1072
 
706
static void r100_cp_load_microcode(struct radeon_device *rdev)
1073
static void r100_cp_load_microcode(struct radeon_device *rdev)
707
{
1074
{
708
	const __be32 *fw_data;
1075
	const __be32 *fw_data;
Line 749... Line 1116...
749
			return r;
1116
			return r;
750
		}
1117
		}
751
	}
1118
	}
Line 752... Line 1119...
752
 
1119
 
753
	/* Align ring size */
1120
	/* Align ring size */
754
	rb_bufsz = drm_order(ring_size / 8);
1121
	rb_bufsz = order_base_2(ring_size / 8);
755
	ring_size = (1 << (rb_bufsz + 1)) * 4;
1122
	ring_size = (1 << (rb_bufsz + 1)) * 4;
756
	r100_cp_load_microcode(rdev);
1123
	r100_cp_load_microcode(rdev);
757
	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
-
 
758
			     RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
1124
	r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
759
			     0, 0x7fffff, RADEON_CP_PACKET2);
1125
			     RADEON_CP_PACKET2);
760
	if (r) {
1126
	if (r) {
761
		return r;
1127
		return r;
762
	}
1128
	}
763
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
1129
	/* Each time the cp read 1024 bytes (16 dword/quadword) update
Line 815... Line 1181...
815
		WREG32(R_000770_SCRATCH_UMSK, 0);
1181
		WREG32(R_000770_SCRATCH_UMSK, 0);
816
	}
1182
	}
Line 817... Line 1183...
817
 
1183
 
818
	WREG32(RADEON_CP_RB_CNTL, tmp);
1184
	WREG32(RADEON_CP_RB_CNTL, tmp);
819
	udelay(10);
-
 
820
	ring->rptr = RREG32(RADEON_CP_RB_RPTR);
1185
	udelay(10);
821
	/* Set cp mode to bus mastering & enable cp*/
1186
	/* Set cp mode to bus mastering & enable cp*/
822
	WREG32(RADEON_CP_CSQ_MODE,
1187
	WREG32(RADEON_CP_CSQ_MODE,
823
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
1188
	       REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
824
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
1189
	       REG_SET(RADEON_INDIRECT1_START, indirect1_start));
Line 869... Line 1234...
869
		printk(KERN_WARNING "Failed to wait GUI idle while "
1234
		printk(KERN_WARNING "Failed to wait GUI idle while "
870
		       "programming pipes. Bad things might happen.\n");
1235
		       "programming pipes. Bad things might happen.\n");
871
	}
1236
	}
872
}
1237
}
Line 873... Line -...
873
 
-
 
874
#if 0
1238
 
875
/*
1239
/*
876
 * CS functions
1240
 * CS functions
877
 */
1241
 */
878
int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1242
int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
Line 894... Line 1258...
894
		return r;
1258
		return r;
895
	}
1259
	}
Line 896... Line 1260...
896
 
1260
 
897
	value = radeon_get_ib_value(p, idx);
1261
	value = radeon_get_ib_value(p, idx);
898
	tmp = value & 0x003fffff;
1262
	tmp = value & 0x003fffff;
Line 899... Line 1263...
899
	tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
1263
	tmp += (((u32)reloc->gpu_offset) >> 10);
900
 
1264
 
901
	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1265
	if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
902
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1266
		if (reloc->tiling_flags & RADEON_TILING_MACRO)
903
			tile_flags |= RADEON_DST_TILE_MACRO;
1267
			tile_flags |= RADEON_DST_TILE_MACRO;
904
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
1268
		if (reloc->tiling_flags & RADEON_TILING_MICRO) {
905
			if (reg == RADEON_SRC_PITCH_OFFSET) {
1269
			if (reg == RADEON_SRC_PITCH_OFFSET) {
906
				DRM_ERROR("Cannot src blit from microtiled surface\n");
1270
				DRM_ERROR("Cannot src blit from microtiled surface\n");
907
				radeon_cs_dump_packet(p, pkt);
1271
				radeon_cs_dump_packet(p, pkt);
Line 945... Line 1309...
945
				  pkt->opcode);
1309
				  pkt->opcode);
946
			radeon_cs_dump_packet(p, pkt);
1310
			radeon_cs_dump_packet(p, pkt);
947
			return r;
1311
			return r;
948
		}
1312
		}
949
		idx_value = radeon_get_ib_value(p, idx);
1313
		idx_value = radeon_get_ib_value(p, idx);
950
		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1314
		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
Line 951... Line 1315...
951
 
1315
 
952
		track->arrays[i + 0].esize = idx_value >> 8;
1316
		track->arrays[i + 0].esize = idx_value >> 8;
953
		track->arrays[i + 0].robj = reloc->robj;
1317
		track->arrays[i + 0].robj = reloc->robj;
954
		track->arrays[i + 0].esize &= 0x7F;
1318
		track->arrays[i + 0].esize &= 0x7F;
Line 957... Line 1321...
957
			DRM_ERROR("No reloc for packet3 %d\n",
1321
			DRM_ERROR("No reloc for packet3 %d\n",
958
				  pkt->opcode);
1322
				  pkt->opcode);
959
			radeon_cs_dump_packet(p, pkt);
1323
			radeon_cs_dump_packet(p, pkt);
960
			return r;
1324
			return r;
961
		}
1325
		}
962
		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
1326
		ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->gpu_offset);
963
		track->arrays[i + 1].robj = reloc->robj;
1327
		track->arrays[i + 1].robj = reloc->robj;
964
		track->arrays[i + 1].esize = idx_value >> 24;
1328
		track->arrays[i + 1].esize = idx_value >> 24;
965
		track->arrays[i + 1].esize &= 0x7F;
1329
		track->arrays[i + 1].esize &= 0x7F;
966
	}
1330
	}
967
	if (c & 1) {
1331
	if (c & 1) {
Line 971... Line 1335...
971
					  pkt->opcode);
1335
					  pkt->opcode);
972
			radeon_cs_dump_packet(p, pkt);
1336
			radeon_cs_dump_packet(p, pkt);
973
			return r;
1337
			return r;
974
		}
1338
		}
975
		idx_value = radeon_get_ib_value(p, idx);
1339
		idx_value = radeon_get_ib_value(p, idx);
976
		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1340
		ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->gpu_offset);
977
		track->arrays[i + 0].robj = reloc->robj;
1341
		track->arrays[i + 0].robj = reloc->robj;
978
		track->arrays[i + 0].esize = idx_value >> 8;
1342
		track->arrays[i + 0].esize = idx_value >> 8;
979
		track->arrays[i + 0].esize &= 0x7F;
1343
		track->arrays[i + 0].esize &= 0x7F;
980
	}
1344
	}
981
	return r;
1345
	return r;
Line 1024... Line 1388...
1024
		}
1388
		}
1025
	}
1389
	}
1026
	return 0;
1390
	return 0;
1027
}
1391
}
Line 1028... Line -...
1028
 
-
 
1029
void r100_cs_dump_packet(struct radeon_cs_parser *p,
-
 
1030
			 struct radeon_cs_packet *pkt)
-
 
1031
{
-
 
1032
	volatile uint32_t *ib;
-
 
1033
	unsigned i;
-
 
1034
	unsigned idx;
-
 
1035
 
-
 
1036
	ib = p->ib.ptr;
-
 
1037
	idx = pkt->idx;
-
 
1038
	for (i = 0; i <= (pkt->count + 1); i++, idx++) {
-
 
1039
		DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
-
 
1040
	}
-
 
1041
}
-
 
1042
 
-
 
1043
/**
-
 
1044
 * r100_cs_packet_parse() - parse cp packet and point ib index to next packet
-
 
1045
 * @parser:	parser structure holding parsing context.
-
 
1046
 * @pkt:	where to store packet informations
-
 
1047
 *
-
 
1048
 * Assume that chunk_ib_index is properly set. Will return -EINVAL
-
 
1049
 * if packet is bigger than remaining ib size. or if packets is unknown.
-
 
1050
 **/
-
 
1051
int r100_cs_packet_parse(struct radeon_cs_parser *p,
-
 
1052
			 struct radeon_cs_packet *pkt,
-
 
1053
			 unsigned idx)
-
 
1054
{
-
 
1055
	struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
-
 
1056
	uint32_t header;
-
 
1057
 
-
 
1058
	if (idx >= ib_chunk->length_dw) {
-
 
1059
		DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
-
 
1060
			  idx, ib_chunk->length_dw);
-
 
1061
		return -EINVAL;
-
 
1062
	}
-
 
1063
	header = radeon_get_ib_value(p, idx);
-
 
1064
	pkt->idx = idx;
-
 
1065
	pkt->type = CP_PACKET_GET_TYPE(header);
-
 
1066
	pkt->count = CP_PACKET_GET_COUNT(header);
-
 
1067
	switch (pkt->type) {
-
 
1068
	case PACKET_TYPE0:
-
 
1069
		pkt->reg = CP_PACKET0_GET_REG(header);
-
 
1070
		pkt->one_reg_wr = CP_PACKET0_GET_ONE_REG_WR(header);
-
 
1071
		break;
-
 
1072
	case PACKET_TYPE3:
-
 
1073
		pkt->opcode = CP_PACKET3_GET_OPCODE(header);
-
 
1074
		break;
-
 
1075
	case PACKET_TYPE2:
-
 
1076
		pkt->count = -1;
-
 
1077
		break;
-
 
1078
	default:
-
 
1079
		DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
-
 
1080
		return -EINVAL;
-
 
1081
	}
-
 
1082
	if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
-
 
1083
		DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
-
 
1084
			  pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
-
 
1085
		return -EINVAL;
-
 
1086
	}
-
 
1087
	return 0;
-
 
1088
}
-
 
1089
 
1392
 
1090
/**
1393
/**
1091
 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1394
 * r100_cs_packet_next_vline() - parse userspace VLINE packet
1092
 * @parser:		parser structure holding parsing context.
1395
 * @parser:		parser structure holding parsing context.
1093
 *
1396
 *
Line 1101... Line 1404...
1101
 * It also detects a switched off crtc and nulls out the
1404
 * It also detects a switched off crtc and nulls out the
1102
 * wait in that case.
1405
 * wait in that case.
1103
 */
1406
 */
1104
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1407
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1105
{
1408
{
1106
	struct drm_mode_object *obj;
-
 
1107
	struct drm_crtc *crtc;
1409
	struct drm_crtc *crtc;
1108
	struct radeon_crtc *radeon_crtc;
1410
	struct radeon_crtc *radeon_crtc;
1109
	struct radeon_cs_packet p3reloc, waitreloc;
1411
	struct radeon_cs_packet p3reloc, waitreloc;
1110
	int crtc_id;
1412
	int crtc_id;
1111
	int r;
1413
	int r;
Line 1141... Line 1443...
1141
	p->idx += p3reloc.count + 2;
1443
	p->idx += p3reloc.count + 2;
Line 1142... Line 1444...
1142
 
1444
 
1143
	header = radeon_get_ib_value(p, h_idx);
1445
	header = radeon_get_ib_value(p, h_idx);
1144
	crtc_id = radeon_get_ib_value(p, h_idx + 5);
1446
	crtc_id = radeon_get_ib_value(p, h_idx + 5);
1145
	reg = R100_CP_PACKET0_GET_REG(header);
1447
	reg = R100_CP_PACKET0_GET_REG(header);
1146
	obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
1448
	crtc = drm_crtc_find(p->rdev->ddev, crtc_id);
1147
	if (!obj) {
1449
	if (!crtc) {
1148
		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1450
		DRM_ERROR("cannot find crtc %d\n", crtc_id);
1149
		return -EINVAL;
1451
		return -ENOENT;
1150
	}
-
 
1151
	crtc = obj_to_crtc(obj);
1452
	}
1152
	radeon_crtc = to_radeon_crtc(crtc);
1453
	radeon_crtc = to_radeon_crtc(crtc);
Line 1153... Line 1454...
1153
	crtc_id = radeon_crtc->crtc_id;
1454
	crtc_id = radeon_crtc->crtc_id;
1154
 
1455
 
Line 1275... Line 1576...
1275
				return r;
1576
				return r;
1276
			}
1577
			}
1277
		track->zb.robj = reloc->robj;
1578
		track->zb.robj = reloc->robj;
1278
		track->zb.offset = idx_value;
1579
		track->zb.offset = idx_value;
1279
		track->zb_dirty = true;
1580
		track->zb_dirty = true;
1280
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1581
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1281
			break;
1582
			break;
1282
		case RADEON_RB3D_COLOROFFSET:
1583
		case RADEON_RB3D_COLOROFFSET:
1283
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1584
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1284
		if (r) {
1585
		if (r) {
1285
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1586
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
Line 1288... Line 1589...
1288
			return r;
1589
			return r;
1289
		}
1590
		}
1290
		track->cb[0].robj = reloc->robj;
1591
		track->cb[0].robj = reloc->robj;
1291
		track->cb[0].offset = idx_value;
1592
		track->cb[0].offset = idx_value;
1292
		track->cb_dirty = true;
1593
		track->cb_dirty = true;
1293
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1594
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1294
		break;
1595
		break;
1295
		case RADEON_PP_TXOFFSET_0:
1596
		case RADEON_PP_TXOFFSET_0:
1296
		case RADEON_PP_TXOFFSET_1:
1597
		case RADEON_PP_TXOFFSET_1:
1297
		case RADEON_PP_TXOFFSET_2:
1598
		case RADEON_PP_TXOFFSET_2:
1298
		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1599
		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
Line 1302... Line 1603...
1302
				  idx, reg);
1603
				  idx, reg);
1303
			radeon_cs_dump_packet(p, pkt);
1604
			radeon_cs_dump_packet(p, pkt);
1304
			return r;
1605
			return r;
1305
		}
1606
		}
1306
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1607
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1307
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1608
			if (reloc->tiling_flags & RADEON_TILING_MACRO)
1308
				tile_flags |= RADEON_TXO_MACRO_TILE;
1609
				tile_flags |= RADEON_TXO_MACRO_TILE;
1309
			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1610
			if (reloc->tiling_flags & RADEON_TILING_MICRO)
1310
				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
1611
				tile_flags |= RADEON_TXO_MICRO_TILE_X2;
Line 1311... Line 1612...
1311
 
1612
 
1312
			tmp = idx_value & ~(0x7 << 2);
1613
			tmp = idx_value & ~(0x7 << 2);
1313
			tmp |= tile_flags;
1614
			tmp |= tile_flags;
1314
			ib[idx] = tmp + ((u32)reloc->lobj.gpu_offset);
1615
			ib[idx] = tmp + ((u32)reloc->gpu_offset);
1315
		} else
1616
		} else
1316
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1617
			ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1317
		track->textures[i].robj = reloc->robj;
1618
		track->textures[i].robj = reloc->robj;
1318
		track->tex_dirty = true;
1619
		track->tex_dirty = true;
1319
		break;
1620
		break;
1320
	case RADEON_PP_CUBIC_OFFSET_T0_0:
1621
	case RADEON_PP_CUBIC_OFFSET_T0_0:
Line 1329... Line 1630...
1329
				  idx, reg);
1630
				  idx, reg);
1330
			radeon_cs_dump_packet(p, pkt);
1631
			radeon_cs_dump_packet(p, pkt);
1331
			return r;
1632
			return r;
1332
		}
1633
		}
1333
		track->textures[0].cube_info[i].offset = idx_value;
1634
		track->textures[0].cube_info[i].offset = idx_value;
1334
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1635
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1335
		track->textures[0].cube_info[i].robj = reloc->robj;
1636
		track->textures[0].cube_info[i].robj = reloc->robj;
1336
		track->tex_dirty = true;
1637
		track->tex_dirty = true;
1337
		break;
1638
		break;
1338
	case RADEON_PP_CUBIC_OFFSET_T1_0:
1639
	case RADEON_PP_CUBIC_OFFSET_T1_0:
1339
	case RADEON_PP_CUBIC_OFFSET_T1_1:
1640
	case RADEON_PP_CUBIC_OFFSET_T1_1:
Line 1347... Line 1648...
1347
				  idx, reg);
1648
				  idx, reg);
1348
			radeon_cs_dump_packet(p, pkt);
1649
			radeon_cs_dump_packet(p, pkt);
1349
			return r;
1650
			return r;
1350
			}
1651
			}
1351
		track->textures[1].cube_info[i].offset = idx_value;
1652
		track->textures[1].cube_info[i].offset = idx_value;
1352
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1653
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1353
		track->textures[1].cube_info[i].robj = reloc->robj;
1654
		track->textures[1].cube_info[i].robj = reloc->robj;
1354
		track->tex_dirty = true;
1655
		track->tex_dirty = true;
1355
		break;
1656
		break;
1356
	case RADEON_PP_CUBIC_OFFSET_T2_0:
1657
	case RADEON_PP_CUBIC_OFFSET_T2_0:
1357
	case RADEON_PP_CUBIC_OFFSET_T2_1:
1658
	case RADEON_PP_CUBIC_OFFSET_T2_1:
Line 1365... Line 1666...
1365
					  idx, reg);
1666
					  idx, reg);
1366
			radeon_cs_dump_packet(p, pkt);
1667
			radeon_cs_dump_packet(p, pkt);
1367
				return r;
1668
				return r;
1368
			}
1669
			}
1369
		track->textures[2].cube_info[i].offset = idx_value;
1670
		track->textures[2].cube_info[i].offset = idx_value;
1370
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1671
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1371
		track->textures[2].cube_info[i].robj = reloc->robj;
1672
		track->textures[2].cube_info[i].robj = reloc->robj;
1372
		track->tex_dirty = true;
1673
		track->tex_dirty = true;
1373
		break;
1674
		break;
1374
	case RADEON_RE_WIDTH_HEIGHT:
1675
	case RADEON_RE_WIDTH_HEIGHT:
1375
		track->maxy = ((idx_value >> 16) & 0x7FF);
1676
		track->maxy = ((idx_value >> 16) & 0x7FF);
Line 1383... Line 1684...
1383
					  idx, reg);
1684
					  idx, reg);
1384
			radeon_cs_dump_packet(p, pkt);
1685
			radeon_cs_dump_packet(p, pkt);
1385
				return r;
1686
				return r;
1386
			}
1687
			}
1387
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1688
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1388
			if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1689
			if (reloc->tiling_flags & RADEON_TILING_MACRO)
1389
				tile_flags |= RADEON_COLOR_TILE_ENABLE;
1690
				tile_flags |= RADEON_COLOR_TILE_ENABLE;
1390
			if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1691
			if (reloc->tiling_flags & RADEON_TILING_MICRO)
1391
				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1692
				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
Line 1392... Line 1693...
1392
 
1693
 
1393
		tmp = idx_value & ~(0x7 << 16);
1694
		tmp = idx_value & ~(0x7 << 16);
1394
			tmp |= tile_flags;
1695
			tmp |= tile_flags;
Line 1453... Line 1754...
1453
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1754
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1454
					  idx, reg);
1755
					  idx, reg);
1455
			radeon_cs_dump_packet(p, pkt);
1756
			radeon_cs_dump_packet(p, pkt);
1456
				return r;
1757
				return r;
1457
			}
1758
			}
1458
		ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1759
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1459
			break;
1760
			break;
1460
	case RADEON_PP_CNTL:
1761
	case RADEON_PP_CNTL:
1461
		{
1762
		{
1462
			uint32_t temp = idx_value >> 4;
1763
			uint32_t temp = idx_value >> 4;
1463
			for (i = 0; i < track->num_texture; i++)
1764
			for (i = 0; i < track->num_texture; i++)
Line 1613... Line 1914...
1613
		if (r) {
1914
		if (r) {
1614
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1915
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1615
			radeon_cs_dump_packet(p, pkt);
1916
			radeon_cs_dump_packet(p, pkt);
1616
			return r;
1917
			return r;
1617
		}
1918
		}
1618
		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1919
		ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->gpu_offset);
1619
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1920
		r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1620
		if (r) {
1921
		if (r) {
1621
			return r;
1922
			return r;
1622
		}
1923
		}
1623
		break;
1924
		break;
Line 1627... Line 1928...
1627
		if (r) {
1928
		if (r) {
1628
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1929
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1629
			radeon_cs_dump_packet(p, pkt);
1930
			radeon_cs_dump_packet(p, pkt);
1630
			return r;
1931
			return r;
1631
		}
1932
		}
1632
		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1933
		ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->gpu_offset);
1633
		track->num_arrays = 1;
1934
		track->num_arrays = 1;
1634
		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1935
		track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
Line 1635... Line 1936...
1635
 
1936
 
1636
		track->arrays[0].robj = reloc->robj;
1937
		track->arrays[0].robj = reloc->robj;
Line 2128... Line 2429...
2128
				track->textures[i].cube_info[face].height = 16536;
2429
				track->textures[i].cube_info[face].height = 16536;
2129
				track->textures[i].cube_info[face].offset = 0;
2430
				track->textures[i].cube_info[face].offset = 0;
2130
			}
2431
			}
2131
	}
2432
	}
2132
}
2433
}
2133
#endif
-
 
Line 2134... Line 2434...
2134
 
2434
 
2135
/*
2435
/*
2136
 * Global GPU functions
2436
 * Global GPU functions
2137
 */
2437
 */
Line 2204... Line 2504...
2204
{
2504
{
2205
	u32 rbbm_status;
2505
	u32 rbbm_status;
Line 2206... Line 2506...
2206
 
2506
 
2207
	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2507
	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2208
	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2508
	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2209
		radeon_ring_lockup_update(ring);
2509
		radeon_ring_lockup_update(rdev, ring);
2210
		return false;
2510
		return false;
2211
		}
-
 
2212
	/* force CP activities */
-
 
2213
	radeon_ring_force_activity(rdev, ring);
2511
		}
2214
	return radeon_ring_test_lockup(rdev, ring);
2512
	return radeon_ring_test_lockup(rdev, ring);
Line 2215... Line 2513...
2215
}
2513
}
2216
 
2514
 
Line 2564... Line 2862...
2564
	}
2862
	}
2565
}
2863
}
Line 2566... Line 2864...
2566
 
2864
 
2567
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
2865
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg)
-
 
2866
{
2568
{
2867
	unsigned long flags;
Line -... Line 2868...
-
 
2868
	uint32_t data;
2569
	uint32_t data;
2869
 
2570
 
2870
	spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2571
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2871
	WREG8(RADEON_CLOCK_CNTL_INDEX, reg & 0x3f);
2572
	r100_pll_errata_after_index(rdev);
2872
	r100_pll_errata_after_index(rdev);
-
 
2873
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
2573
	data = RREG32(RADEON_CLOCK_CNTL_DATA);
2874
	r100_pll_errata_after_data(rdev);
2574
	r100_pll_errata_after_data(rdev);
2875
	spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
Line 2575... Line 2876...
2575
	return data;
2876
	return data;
2576
}
2877
}
-
 
2878
 
-
 
2879
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
 
2880
{
2577
 
2881
	unsigned long flags;
2578
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
2882
 
2579
{
2883
	spin_lock_irqsave(&rdev->pll_idx_lock, flags);
2580
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
2884
	WREG8(RADEON_CLOCK_CNTL_INDEX, ((reg & 0x3f) | RADEON_PLL_WR_EN));
-
 
2885
	r100_pll_errata_after_index(rdev);
2581
	r100_pll_errata_after_index(rdev);
2886
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
Line 2582... Line 2887...
2582
	WREG32(RADEON_CLOCK_CNTL_DATA, v);
2887
	r100_pll_errata_after_data(rdev);
2583
	r100_pll_errata_after_data(rdev);
2888
	spin_unlock_irqrestore(&rdev->pll_idx_lock, flags);
2584
}
2889
}
Line 2637... Line 2942...
2637
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2942
	seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
2638
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2943
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2639
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2944
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2640
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2945
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2641
	seq_printf(m, "%u dwords in ring\n", count);
2946
	seq_printf(m, "%u dwords in ring\n", count);
-
 
2947
	if (ring->ready) {
2642
	for (j = 0; j <= count; j++) {
2948
	for (j = 0; j <= count; j++) {
2643
		i = (rdp + j) & ring->ptr_mask;
2949
		i = (rdp + j) & ring->ptr_mask;
2644
		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2950
		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2645
	}
2951
	}
-
 
2952
	}
2646
	return 0;
2953
	return 0;
2647
}
2954
}
Line 2648... Line 2955...
2648
 
2955
 
Line 2779... Line 3086...
2779
		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3086
		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2780
				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
3087
				 == (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
2781
			flags |= RADEON_SURF_TILE_COLOR_BOTH;
3088
			flags |= RADEON_SURF_TILE_COLOR_BOTH;
2782
		if (tiling_flags & RADEON_TILING_MACRO)
3089
		if (tiling_flags & RADEON_TILING_MACRO)
2783
			flags |= RADEON_SURF_TILE_COLOR_MACRO;
3090
			flags |= RADEON_SURF_TILE_COLOR_MACRO;
-
 
3091
		/* setting pitch to 0 disables tiling */
-
 
3092
		if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO))
-
 
3093
				== 0)
-
 
3094
			pitch = 0;
2784
	} else if (rdev->family <= CHIP_RV280) {
3095
	} else if (rdev->family <= CHIP_RV280) {
2785
		if (tiling_flags & (RADEON_TILING_MACRO))
3096
		if (tiling_flags & (RADEON_TILING_MACRO))
2786
			flags |= R200_SURF_TILE_COLOR_MACRO;
3097
			flags |= R200_SURF_TILE_COLOR_MACRO;
2787
		if (tiling_flags & RADEON_TILING_MICRO)
3098
		if (tiling_flags & RADEON_TILING_MICRO)
2788
			flags |= R200_SURF_TILE_COLOR_MICRO;
3099
			flags |= R200_SURF_TILE_COLOR_MICRO;
Line 2796... Line 3107...
2796
	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
3107
	if (tiling_flags & RADEON_TILING_SWAP_16BIT)
2797
		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
3108
		flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP;
2798
	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
3109
	if (tiling_flags & RADEON_TILING_SWAP_32BIT)
2799
		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
3110
		flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP;
Line 2800... Line -...
2800
 
-
 
2801
	/* when we aren't tiling the pitch seems to needs to be furtherdivided down. - tested on power5 + rn50 server */
-
 
2802
	if (tiling_flags & (RADEON_TILING_SWAP_16BIT | RADEON_TILING_SWAP_32BIT)) {
-
 
2803
		if (!(tiling_flags & (RADEON_TILING_MACRO | RADEON_TILING_MICRO)))
-
 
2804
			if (ASIC_IS_RN50(rdev))
-
 
2805
				pitch /= 16;
-
 
2806
	}
-
 
2807
 
3111
 
2808
	/* r100/r200 divide by 16 */
3112
	/* r100/r200 divide by 16 */
2809
	if (rdev->family < CHIP_R300)
3113
	if (rdev->family < CHIP_R300)
2810
		flags |= pitch / 16;
3114
		flags |= pitch / 16;
2811
	else
3115
	else
Line 2898... Line 3202...
2898
 
3202
 
Line 2899... Line 3203...
2899
	radeon_update_display_priority(rdev);
3203
	radeon_update_display_priority(rdev);
2900
 
3204
 
2901
	if (rdev->mode_info.crtcs[0]->base.enabled) {
3205
	if (rdev->mode_info.crtcs[0]->base.enabled) {
2902
		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3206
		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
2903
		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
3207
		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
2904
	}
3208
	}
2905
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3209
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
2906
	if (rdev->mode_info.crtcs[1]->base.enabled) {
3210
	if (rdev->mode_info.crtcs[1]->base.enabled) {
2907
		mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3211
		mode2 = &rdev->mode_info.crtcs[1]->base.mode;
2908
		pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8;
3212
			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
Line 2909... Line 3213...
2909
	}
3213
	}
2910
	}
3214
	}
Line 3328... Line 3632...
3328
		radeon_scratch_free(rdev, scratch);
3632
		radeon_scratch_free(rdev, scratch);
3329
		return r;
3633
		return r;
3330
	}
3634
	}
3331
	radeon_ring_write(ring, PACKET0(scratch, 0));
3635
	radeon_ring_write(ring, PACKET0(scratch, 0));
3332
	radeon_ring_write(ring, 0xDEADBEEF);
3636
	radeon_ring_write(ring, 0xDEADBEEF);
3333
	radeon_ring_unlock_commit(rdev, ring);
3637
	radeon_ring_unlock_commit(rdev, ring, false);
3334
	for (i = 0; i < rdev->usec_timeout; i++) {
3638
	for (i = 0; i < rdev->usec_timeout; i++) {
3335
		tmp = RREG32(scratch);
3639
		tmp = RREG32(scratch);
3336
		if (tmp == 0xDEADBEEF) {
3640
		if (tmp == 0xDEADBEEF) {
3337
			break;
3641
			break;
3338
		}
3642
		}
Line 3390... Line 3694...
3390
	ib.ptr[4] = PACKET2(0);
3694
	ib.ptr[4] = PACKET2(0);
3391
	ib.ptr[5] = PACKET2(0);
3695
	ib.ptr[5] = PACKET2(0);
3392
	ib.ptr[6] = PACKET2(0);
3696
	ib.ptr[6] = PACKET2(0);
3393
	ib.ptr[7] = PACKET2(0);
3697
	ib.ptr[7] = PACKET2(0);
3394
	ib.length_dw = 8;
3698
	ib.length_dw = 8;
3395
	r = radeon_ib_schedule(rdev, &ib, NULL);
3699
	r = radeon_ib_schedule(rdev, &ib, NULL, false);
3396
	if (r) {
3700
	if (r) {
3397
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3701
		DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
3398
		goto free_ib;
3702
		goto free_ib;
3399
	}
3703
	}
3400
	r = radeon_fence_wait(ib.fence, false);
3704
	r = radeon_fence_wait(ib.fence, false);
Line 3686... Line 3990...
3686
		if (r)
3990
		if (r)
3687
			return r;
3991
			return r;
3688
	}
3992
	}
3689
	r100_set_safe_registers(rdev);
3993
	r100_set_safe_registers(rdev);
Line -... Line 3994...
-
 
3994
 
-
 
3995
	/* Initialize power management */
-
 
3996
	radeon_pm_init(rdev);
3690
 
3997
 
3691
	rdev->accel_working = true;
3998
	rdev->accel_working = true;
3692
	r = r100_startup(rdev);
3999
	r = r100_startup(rdev);
3693
	if (r) {
4000
	if (r) {
3694
		/* Somethings want wront with the accel init stop accel */
4001
		/* Somethings want wront with the accel init stop accel */
Line 3698... Line 4005...
3698
		rdev->accel_working = false;
4005
		rdev->accel_working = false;
3699
	}
4006
	}
3700
	return 0;
4007
	return 0;
3701
}
4008
}
Line 3702... Line -...
3702
 
-
 
3703
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg,
-
 
3704
		      bool always_indirect)
-
 
3705
{
-
 
3706
	if (reg < rdev->rmmio_size && !always_indirect)
-
 
3707
		return readl(((void __iomem *)rdev->rmmio) + reg);
-
 
3708
	else {
-
 
3709
		unsigned long flags;
-
 
3710
		uint32_t ret;
-
 
3711
 
-
 
3712
		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
-
 
3713
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-
 
3714
		ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
3715
		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
 
3716
 
-
 
3717
		return ret;
-
 
3718
	}
-
 
3719
}
-
 
3720
 
-
 
3721
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v,
-
 
3722
		  bool always_indirect)
-
 
3723
{
-
 
3724
	if (reg < rdev->rmmio_size && !always_indirect)
-
 
3725
		writel(v, ((void __iomem *)rdev->rmmio) + reg);
-
 
3726
	else {
-
 
3727
		unsigned long flags;
-
 
3728
 
-
 
3729
		spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
-
 
3730
		writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-
 
3731
		writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
3732
		spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
 
3733
	}
-
 
3734
}
-
 
3735
 
4009
 
3736
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4010
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
3737
{
4011
{
3738
	if (reg < rdev->rio_mem_size)
4012
	if (reg < rdev->rio_mem_size)
3739
		return ioread32(rdev->rio_mem + reg);
4013
		return ioread32(rdev->rio_mem + reg);