Subversion Repositories Kolibri OS

Rev

Rev 1963 | Rev 1990 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1963 Rev 1986
Line 30... Line 30...
30
#include "radeon_drm.h"
30
#include "radeon_drm.h"
31
#include "evergreend.h"
31
#include "evergreend.h"
32
#include "atom.h"
32
#include "atom.h"
33
#include "avivod.h"
33
#include "avivod.h"
34
#include "evergreen_reg.h"
34
#include "evergreen_reg.h"
-
 
35
#include "evergreen_blit_shaders.h"
Line 35... Line 36...
35
 
36
 
36
#define EVERGREEN_PFP_UCODE_SIZE 1120
37
#define EVERGREEN_PFP_UCODE_SIZE 1120
Line 37... Line 38...
37
#define EVERGREEN_PM4_UCODE_SIZE 1376
38
#define EVERGREEN_PM4_UCODE_SIZE 1376
38
 
39
 
-
 
40
static void evergreen_gpu_init(struct radeon_device *rdev);
Line 39... Line 41...
39
static void evergreen_gpu_init(struct radeon_device *rdev);
41
void evergreen_fini(struct radeon_device *rdev);
40
void evergreen_fini(struct radeon_device *rdev);
42
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
41
 
43
 
Line 215... Line 217...
215
		}
217
		}
216
	}
218
	}
217
}
219
}
Line 218... Line 220...
218
 
220
 
-
 
221
#endif
Line -... Line 222...
-
 
222
/* watermark setup */
-
 
223
 
-
 
224
static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev,
-
 
225
					struct radeon_crtc *radeon_crtc,
-
 
226
					struct drm_display_mode *mode,
-
 
227
					struct drm_display_mode *other_mode)
-
 
228
{
-
 
229
	u32 tmp;
-
 
230
	/*
-
 
231
	 * Line Buffer Setup
-
 
232
	 * There are 3 line buffers, each one shared by 2 display controllers.
-
 
233
	 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
-
 
234
	 * the display controllers.  The paritioning is done via one of four
-
 
235
	 * preset allocations specified in bits 2:0:
-
 
236
	 * first display controller
-
 
237
	 *  0 - first half of lb (3840 * 2)
-
 
238
	 *  1 - first 3/4 of lb (5760 * 2)
-
 
239
	 *  2 - whole lb (7680 * 2), other crtc must be disabled
-
 
240
	 *  3 - first 1/4 of lb (1920 * 2)
-
 
241
	 * second display controller
-
 
242
	 *  4 - second half of lb (3840 * 2)
-
 
243
	 *  5 - second 3/4 of lb (5760 * 2)
-
 
244
	 *  6 - whole lb (7680 * 2), other crtc must be disabled
-
 
245
	 *  7 - last 1/4 of lb (1920 * 2)
-
 
246
	 */
-
 
247
	/* this can get tricky if we have two large displays on a paired group
-
 
248
	 * of crtcs.  Ideally for multiple large displays we'd assign them to
-
 
249
	 * non-linked crtcs for maximum line buffer allocation.
-
 
250
	 */
-
 
251
	if (radeon_crtc->base.enabled && mode) {
-
 
252
		if (other_mode)
-
 
253
			tmp = 0; /* 1/2 */
-
 
254
		else
-
 
255
			tmp = 2; /* whole */
-
 
256
	} else
-
 
257
		tmp = 0;
-
 
258
 
-
 
259
	/* second controller of the pair uses second half of the lb */
-
 
260
	if (radeon_crtc->crtc_id % 2)
-
 
261
		tmp += 4;
-
 
262
	WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp);
-
 
263
 
-
 
264
	if (radeon_crtc->base.enabled && mode) {
-
 
265
		switch (tmp) {
-
 
266
		case 0:
-
 
267
		case 4:
-
 
268
		default:
-
 
269
			if (ASIC_IS_DCE5(rdev))
-
 
270
				return 4096 * 2;
-
 
271
			else
-
 
272
				return 3840 * 2;
-
 
273
		case 1:
-
 
274
		case 5:
-
 
275
			if (ASIC_IS_DCE5(rdev))
-
 
276
				return 6144 * 2;
-
 
277
			else
-
 
278
				return 5760 * 2;
-
 
279
		case 2:
-
 
280
		case 6:
-
 
281
			if (ASIC_IS_DCE5(rdev))
-
 
282
				return 8192 * 2;
-
 
283
			else
-
 
284
				return 7680 * 2;
-
 
285
		case 3:
-
 
286
		case 7:
-
 
287
			if (ASIC_IS_DCE5(rdev))
-
 
288
				return 2048 * 2;
-
 
289
			else
-
 
290
				return 1920 * 2;
-
 
291
		}
-
 
292
	}
-
 
293
 
-
 
294
	/* controller not enabled, so no lb used */
-
 
295
	return 0;
-
 
296
}
-
 
297
 
-
 
298
static u32 evergreen_get_number_of_dram_channels(struct radeon_device *rdev)
-
 
299
{
-
 
300
	u32 tmp = RREG32(MC_SHARED_CHMAP);
-
 
301
 
-
 
302
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-
 
303
	case 0:
-
 
304
	default:
-
 
305
		return 1;
-
 
306
	case 1:
-
 
307
		return 2;
-
 
308
	case 2:
-
 
309
		return 4;
-
 
310
	case 3:
-
 
311
		return 8;
-
 
312
	}
-
 
313
}
-
 
314
 
-
 
315
struct evergreen_wm_params {
-
 
316
	u32 dram_channels; /* number of dram channels */
-
 
317
	u32 yclk;          /* bandwidth per dram data pin in kHz */
-
 
318
	u32 sclk;          /* engine clock in kHz */
-
 
319
	u32 disp_clk;      /* display clock in kHz */
-
 
320
	u32 src_width;     /* viewport width */
-
 
321
	u32 active_time;   /* active display time in ns */
-
 
322
	u32 blank_time;    /* blank time in ns */
-
 
323
	bool interlaced;    /* mode is interlaced */
-
 
324
	fixed20_12 vsc;    /* vertical scale ratio */
-
 
325
	u32 num_heads;     /* number of active crtcs */
-
 
326
	u32 bytes_per_pixel; /* bytes per pixel display + overlay */
-
 
327
	u32 lb_size;       /* line buffer allocated to pipe */
-
 
328
	u32 vtaps;         /* vertical scaler taps */
-
 
329
};
-
 
330
 
-
 
331
static u32 evergreen_dram_bandwidth(struct evergreen_wm_params *wm)
-
 
332
{
-
 
333
	/* Calculate DRAM Bandwidth and the part allocated to display. */
-
 
334
	fixed20_12 dram_efficiency; /* 0.7 */
-
 
335
	fixed20_12 yclk, dram_channels, bandwidth;
-
 
336
	fixed20_12 a;
-
 
337
 
-
 
338
	a.full = dfixed_const(1000);
-
 
339
	yclk.full = dfixed_const(wm->yclk);
-
 
340
	yclk.full = dfixed_div(yclk, a);
-
 
341
	dram_channels.full = dfixed_const(wm->dram_channels * 4);
-
 
342
	a.full = dfixed_const(10);
-
 
343
	dram_efficiency.full = dfixed_const(7);
-
 
344
	dram_efficiency.full = dfixed_div(dram_efficiency, a);
-
 
345
	bandwidth.full = dfixed_mul(dram_channels, yclk);
-
 
346
	bandwidth.full = dfixed_mul(bandwidth, dram_efficiency);
-
 
347
 
-
 
348
	return dfixed_trunc(bandwidth);
-
 
349
}
-
 
350
 
-
 
351
static u32 evergreen_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
-
 
352
{
-
 
353
	/* Calculate DRAM Bandwidth and the part allocated to display. */
-
 
354
	fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */
-
 
355
	fixed20_12 yclk, dram_channels, bandwidth;
-
 
356
	fixed20_12 a;
-
 
357
 
-
 
358
	a.full = dfixed_const(1000);
-
 
359
	yclk.full = dfixed_const(wm->yclk);
-
 
360
	yclk.full = dfixed_div(yclk, a);
-
 
361
	dram_channels.full = dfixed_const(wm->dram_channels * 4);
-
 
362
	a.full = dfixed_const(10);
-
 
363
	disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */
-
 
364
	disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a);
-
 
365
	bandwidth.full = dfixed_mul(dram_channels, yclk);
-
 
366
	bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation);
-
 
367
 
-
 
368
	return dfixed_trunc(bandwidth);
-
 
369
}
-
 
370
 
-
 
371
static u32 evergreen_data_return_bandwidth(struct evergreen_wm_params *wm)
-
 
372
{
-
 
373
	/* Calculate the display Data return Bandwidth */
-
 
374
	fixed20_12 return_efficiency; /* 0.8 */
-
 
375
	fixed20_12 sclk, bandwidth;
-
 
376
	fixed20_12 a;
-
 
377
 
-
 
378
	a.full = dfixed_const(1000);
-
 
379
	sclk.full = dfixed_const(wm->sclk);
-
 
380
	sclk.full = dfixed_div(sclk, a);
-
 
381
	a.full = dfixed_const(10);
-
 
382
	return_efficiency.full = dfixed_const(8);
-
 
383
	return_efficiency.full = dfixed_div(return_efficiency, a);
-
 
384
	a.full = dfixed_const(32);
-
 
385
	bandwidth.full = dfixed_mul(a, sclk);
-
 
386
	bandwidth.full = dfixed_mul(bandwidth, return_efficiency);
-
 
387
 
-
 
388
	return dfixed_trunc(bandwidth);
-
 
389
}
-
 
390
 
-
 
391
static u32 evergreen_dmif_request_bandwidth(struct evergreen_wm_params *wm)
-
 
392
{
-
 
393
	/* Calculate the DMIF Request Bandwidth */
-
 
394
	fixed20_12 disp_clk_request_efficiency; /* 0.8 */
-
 
395
	fixed20_12 disp_clk, bandwidth;
-
 
396
	fixed20_12 a;
-
 
397
 
-
 
398
	a.full = dfixed_const(1000);
-
 
399
	disp_clk.full = dfixed_const(wm->disp_clk);
-
 
400
	disp_clk.full = dfixed_div(disp_clk, a);
-
 
401
	a.full = dfixed_const(10);
-
 
402
	disp_clk_request_efficiency.full = dfixed_const(8);
-
 
403
	disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a);
-
 
404
	a.full = dfixed_const(32);
-
 
405
	bandwidth.full = dfixed_mul(a, disp_clk);
-
 
406
	bandwidth.full = dfixed_mul(bandwidth, disp_clk_request_efficiency);
-
 
407
 
-
 
408
	return dfixed_trunc(bandwidth);
-
 
409
}
-
 
410
 
-
 
411
static u32 evergreen_available_bandwidth(struct evergreen_wm_params *wm)
-
 
412
{
-
 
413
	/* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
-
 
414
	u32 dram_bandwidth = evergreen_dram_bandwidth(wm);
-
 
415
	u32 data_return_bandwidth = evergreen_data_return_bandwidth(wm);
-
 
416
	u32 dmif_req_bandwidth = evergreen_dmif_request_bandwidth(wm);
-
 
417
 
-
 
418
	return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth));
-
 
419
}
-
 
420
 
-
 
421
static u32 evergreen_average_bandwidth(struct evergreen_wm_params *wm)
-
 
422
{
-
 
423
	/* Calculate the display mode Average Bandwidth
-
 
424
	 * DisplayMode should contain the source and destination dimensions,
-
 
425
	 * timing, etc.
-
 
426
	 */
-
 
427
	fixed20_12 bpp;
-
 
428
	fixed20_12 line_time;
-
 
429
	fixed20_12 src_width;
-
 
430
	fixed20_12 bandwidth;
-
 
431
	fixed20_12 a;
-
 
432
 
-
 
433
	a.full = dfixed_const(1000);
-
 
434
	line_time.full = dfixed_const(wm->active_time + wm->blank_time);
-
 
435
	line_time.full = dfixed_div(line_time, a);
-
 
436
	bpp.full = dfixed_const(wm->bytes_per_pixel);
-
 
437
	src_width.full = dfixed_const(wm->src_width);
-
 
438
	bandwidth.full = dfixed_mul(src_width, bpp);
-
 
439
	bandwidth.full = dfixed_mul(bandwidth, wm->vsc);
-
 
440
	bandwidth.full = dfixed_div(bandwidth, line_time);
-
 
441
 
-
 
442
	return dfixed_trunc(bandwidth);
-
 
443
}
-
 
444
 
-
 
445
static u32 evergreen_latency_watermark(struct evergreen_wm_params *wm)
-
 
446
{
-
 
447
	/* First calcualte the latency in ns */
-
 
448
	u32 mc_latency = 2000; /* 2000 ns. */
-
 
449
	u32 available_bandwidth = evergreen_available_bandwidth(wm);
-
 
450
	u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth;
-
 
451
	u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth;
-
 
452
	u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */
-
 
453
	u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) +
-
 
454
		(wm->num_heads * cursor_line_pair_return_time);
-
 
455
	u32 latency = mc_latency + other_heads_data_return_time + dc_latency;
-
 
456
	u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time;
-
 
457
	fixed20_12 a, b, c;
-
 
458
 
-
 
459
	if (wm->num_heads == 0)
-
 
460
		return 0;
-
 
461
 
-
 
462
	a.full = dfixed_const(2);
-
 
463
	b.full = dfixed_const(1);
-
 
464
	if ((wm->vsc.full > a.full) ||
-
 
465
	    ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) ||
-
 
466
	    (wm->vtaps >= 5) ||
-
 
467
	    ((wm->vsc.full >= a.full) && wm->interlaced))
-
 
468
		max_src_lines_per_dst_line = 4;
-
 
469
	else
-
 
470
		max_src_lines_per_dst_line = 2;
-
 
471
 
-
 
472
	a.full = dfixed_const(available_bandwidth);
-
 
473
	b.full = dfixed_const(wm->num_heads);
-
 
474
	a.full = dfixed_div(a, b);
-
 
475
 
-
 
476
	b.full = dfixed_const(1000);
-
 
477
	c.full = dfixed_const(wm->disp_clk);
-
 
478
	b.full = dfixed_div(c, b);
-
 
479
	c.full = dfixed_const(wm->bytes_per_pixel);
-
 
480
	b.full = dfixed_mul(b, c);
-
 
481
 
-
 
482
	lb_fill_bw = min(dfixed_trunc(a), dfixed_trunc(b));
-
 
483
 
-
 
484
	a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
-
 
485
	b.full = dfixed_const(1000);
-
 
486
	c.full = dfixed_const(lb_fill_bw);
-
 
487
	b.full = dfixed_div(c, b);
-
 
488
	a.full = dfixed_div(a, b);
-
 
489
	line_fill_time = dfixed_trunc(a);
-
 
490
 
-
 
491
	if (line_fill_time < wm->active_time)
-
 
492
		return latency;
-
 
493
	else
-
 
494
		return latency + (line_fill_time - wm->active_time);
-
 
495
 
-
 
496
}
-
 
497
 
-
 
498
static bool evergreen_average_bandwidth_vs_dram_bandwidth_for_display(struct evergreen_wm_params *wm)
-
 
499
{
-
 
500
	if (evergreen_average_bandwidth(wm) <=
-
 
501
	    (evergreen_dram_bandwidth_for_display(wm) / wm->num_heads))
-
 
502
		return true;
-
 
503
	else
-
 
504
		return false;
-
 
505
};
-
 
506
 
-
 
507
static bool evergreen_average_bandwidth_vs_available_bandwidth(struct evergreen_wm_params *wm)
-
 
508
{
-
 
509
	if (evergreen_average_bandwidth(wm) <=
-
 
510
	    (evergreen_available_bandwidth(wm) / wm->num_heads))
-
 
511
		return true;
-
 
512
	else
-
 
513
		return false;
-
 
514
};
-
 
515
 
-
 
516
static bool evergreen_check_latency_hiding(struct evergreen_wm_params *wm)
-
 
517
{
-
 
518
	u32 lb_partitions = wm->lb_size / wm->src_width;
-
 
519
	u32 line_time = wm->active_time + wm->blank_time;
-
 
520
	u32 latency_tolerant_lines;
-
 
521
	u32 latency_hiding;
-
 
522
	fixed20_12 a;
-
 
523
 
-
 
524
	a.full = dfixed_const(1);
-
 
525
	if (wm->vsc.full > a.full)
-
 
526
		latency_tolerant_lines = 1;
-
 
527
	else {
-
 
528
		if (lb_partitions <= (wm->vtaps + 1))
-
 
529
			latency_tolerant_lines = 1;
-
 
530
		else
-
 
531
			latency_tolerant_lines = 2;
-
 
532
	}
-
 
533
 
-
 
534
	latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time);
-
 
535
 
-
 
536
	if (evergreen_latency_watermark(wm) <= latency_hiding)
-
 
537
		return true;
-
 
538
	else
-
 
539
		return false;
-
 
540
}
-
 
541
 
-
 
542
static void evergreen_program_watermarks(struct radeon_device *rdev,
-
 
543
					 struct radeon_crtc *radeon_crtc,
-
 
544
					 u32 lb_size, u32 num_heads)
-
 
545
{
-
 
546
	struct drm_display_mode *mode = &radeon_crtc->base.mode;
-
 
547
	struct evergreen_wm_params wm;
-
 
548
	u32 pixel_period;
-
 
549
	u32 line_time = 0;
-
 
550
	u32 latency_watermark_a = 0, latency_watermark_b = 0;
-
 
551
	u32 priority_a_mark = 0, priority_b_mark = 0;
-
 
552
	u32 priority_a_cnt = PRIORITY_OFF;
-
 
553
	u32 priority_b_cnt = PRIORITY_OFF;
-
 
554
	u32 pipe_offset = radeon_crtc->crtc_id * 16;
-
 
555
	u32 tmp, arb_control3;
-
 
556
	fixed20_12 a, b, c;
-
 
557
 
-
 
558
	if (radeon_crtc->base.enabled && num_heads && mode) {
-
 
559
		pixel_period = 1000000 / (u32)mode->clock;
-
 
560
		line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
-
 
561
		priority_a_cnt = 0;
-
 
562
		priority_b_cnt = 0;
-
 
563
 
-
 
564
		wm.yclk = rdev->pm.current_mclk * 10;
-
 
565
		wm.sclk = rdev->pm.current_sclk * 10;
-
 
566
		wm.disp_clk = mode->clock;
-
 
567
		wm.src_width = mode->crtc_hdisplay;
-
 
568
		wm.active_time = mode->crtc_hdisplay * pixel_period;
-
 
569
		wm.blank_time = line_time - wm.active_time;
-
 
570
		wm.interlaced = false;
-
 
571
		if (mode->flags & DRM_MODE_FLAG_INTERLACE)
-
 
572
			wm.interlaced = true;
-
 
573
		wm.vsc = radeon_crtc->vsc;
-
 
574
		wm.vtaps = 1;
-
 
575
		if (radeon_crtc->rmx_type != RMX_OFF)
-
 
576
			wm.vtaps = 2;
-
 
577
		wm.bytes_per_pixel = 4; /* XXX: get this from fb config */
-
 
578
		wm.lb_size = lb_size;
-
 
579
		wm.dram_channels = evergreen_get_number_of_dram_channels(rdev);
-
 
580
		wm.num_heads = num_heads;
-
 
581
 
-
 
582
		/* set for high clocks */
-
 
583
		latency_watermark_a = min(evergreen_latency_watermark(&wm), (u32)65535);
-
 
584
		/* set for low clocks */
-
 
585
		/* wm.yclk = low clk; wm.sclk = low clk */
-
 
586
		latency_watermark_b = min(evergreen_latency_watermark(&wm), (u32)65535);
-
 
587
 
-
 
588
		/* possibly force display priority to high */
-
 
589
		/* should really do this at mode validation time... */
-
 
590
		if (!evergreen_average_bandwidth_vs_dram_bandwidth_for_display(&wm) ||
-
 
591
		    !evergreen_average_bandwidth_vs_available_bandwidth(&wm) ||
-
 
592
		    !evergreen_check_latency_hiding(&wm) ||
-
 
593
		    (rdev->disp_priority == 2)) {
-
 
594
			DRM_INFO("force priority to high\n");
-
 
595
			priority_a_cnt |= PRIORITY_ALWAYS_ON;
-
 
596
			priority_b_cnt |= PRIORITY_ALWAYS_ON;
-
 
597
		}
-
 
598
 
-
 
599
		a.full = dfixed_const(1000);
-
 
600
		b.full = dfixed_const(mode->clock);
-
 
601
		b.full = dfixed_div(b, a);
-
 
602
		c.full = dfixed_const(latency_watermark_a);
-
 
603
		c.full = dfixed_mul(c, b);
-
 
604
		c.full = dfixed_mul(c, radeon_crtc->hsc);
-
 
605
		c.full = dfixed_div(c, a);
-
 
606
		a.full = dfixed_const(16);
-
 
607
		c.full = dfixed_div(c, a);
-
 
608
		priority_a_mark = dfixed_trunc(c);
-
 
609
		priority_a_cnt |= priority_a_mark & PRIORITY_MARK_MASK;
-
 
610
 
-
 
611
		a.full = dfixed_const(1000);
-
 
612
		b.full = dfixed_const(mode->clock);
-
 
613
		b.full = dfixed_div(b, a);
-
 
614
		c.full = dfixed_const(latency_watermark_b);
-
 
615
		c.full = dfixed_mul(c, b);
-
 
616
		c.full = dfixed_mul(c, radeon_crtc->hsc);
-
 
617
		c.full = dfixed_div(c, a);
-
 
618
		a.full = dfixed_const(16);
-
 
619
		c.full = dfixed_div(c, a);
-
 
620
		priority_b_mark = dfixed_trunc(c);
-
 
621
		priority_b_cnt |= priority_b_mark & PRIORITY_MARK_MASK;
-
 
622
	}
-
 
623
 
-
 
624
	/* select wm A */
-
 
625
	arb_control3 = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
-
 
626
	tmp = arb_control3;
-
 
627
	tmp &= ~LATENCY_WATERMARK_MASK(3);
-
 
628
	tmp |= LATENCY_WATERMARK_MASK(1);
-
 
629
	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
-
 
630
	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
-
 
631
	       (LATENCY_LOW_WATERMARK(latency_watermark_a) |
-
 
632
		LATENCY_HIGH_WATERMARK(line_time)));
-
 
633
	/* select wm B */
-
 
634
	tmp = RREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset);
-
 
635
	tmp &= ~LATENCY_WATERMARK_MASK(3);
-
 
636
	tmp |= LATENCY_WATERMARK_MASK(2);
-
 
637
	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, tmp);
-
 
638
	WREG32(PIPE0_LATENCY_CONTROL + pipe_offset,
-
 
639
	       (LATENCY_LOW_WATERMARK(latency_watermark_b) |
-
 
640
		LATENCY_HIGH_WATERMARK(line_time)));
-
 
641
	/* restore original selection */
-
 
642
	WREG32(PIPE0_ARBITRATION_CONTROL3 + pipe_offset, arb_control3);
-
 
643
 
-
 
644
	/* write the priority marks */
-
 
645
	WREG32(PRIORITY_A_CNT + radeon_crtc->crtc_offset, priority_a_cnt);
-
 
646
	WREG32(PRIORITY_B_CNT + radeon_crtc->crtc_offset, priority_b_cnt);
Line 219... Line 647...
219
#endif
647
 
220
 
648
}
-
 
649
 
-
 
650
void evergreen_bandwidth_update(struct radeon_device *rdev)
-
 
651
{
221
 
652
	struct drm_display_mode *mode0 = NULL;
-
 
653
	struct drm_display_mode *mode1 = NULL;
-
 
654
	u32 num_heads = 0, lb_size;
-
 
655
	int i;
-
 
656
 
-
 
657
	radeon_update_display_priority(rdev);
-
 
658
 
-
 
659
	for (i = 0; i < rdev->num_crtc; i++) {
-
 
660
		if (rdev->mode_info.crtcs[i]->base.enabled)
-
 
661
			num_heads++;
-
 
662
	}
-
 
663
	for (i = 0; i < rdev->num_crtc; i += 2) {
-
 
664
		mode0 = &rdev->mode_info.crtcs[i]->base.mode;
-
 
665
		mode1 = &rdev->mode_info.crtcs[i+1]->base.mode;
-
 
666
		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i], mode0, mode1);
-
 
667
		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i], lb_size, num_heads);
222
void evergreen_bandwidth_update(struct radeon_device *rdev)
668
		lb_size = evergreen_line_buffer_adjust(rdev, rdev->mode_info.crtcs[i+1], mode1, mode0);
Line 223... Line 669...
223
{
669
		evergreen_program_watermarks(rdev, rdev->mode_info.crtcs[i+1], lb_size, num_heads);
224
	/* XXX */
670
	}
225
}
671
}
Line 606... Line 1052...
606
	/* we need to own VRAM, so turn off the VGA renderer here
1052
	/* we need to own VRAM, so turn off the VGA renderer here
607
	 * to stop it overwriting our objects */
1053
	 * to stop it overwriting our objects */
608
	rv515_vga_render_disable(rdev);
1054
	rv515_vga_render_disable(rdev);
609
}
1055
}
Line 610... Line -...
610
 
-
 
611
#if 0
1056
 
612
/*
1057
/*
613
 * CP.
1058
 * CP.
-
 
1059
 */
-
 
1060
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
-
 
1061
{
-
 
1062
	/* set to DX10/11 mode */
-
 
1063
	radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
-
 
1064
	radeon_ring_write(rdev, 1);
-
 
1065
	/* FIXME: implement */
-
 
1066
	radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
-
 
1067
	radeon_ring_write(rdev,
-
 
1068
#ifdef __BIG_ENDIAN
-
 
1069
			  (2 << 0) |
-
 
1070
#endif
-
 
1071
			  (ib->gpu_addr & 0xFFFFFFFC));
-
 
1072
	radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
-
 
1073
	radeon_ring_write(rdev, ib->length_dw);
-
 
1074
}
Line 614... Line 1075...
614
 */
1075
 
615
 
1076
 
616
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1077
static int evergreen_cp_load_microcode(struct radeon_device *rdev)
617
{
1078
{
Line 928... Line 1389...
928
		cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1389
		cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
929
	}
1390
	}
Line 930... Line 1391...
930
 
1391
 
931
	return backend_map;
1392
	return backend_map;
-
 
1393
}
-
 
1394
 
-
 
1395
static void evergreen_program_channel_remap(struct radeon_device *rdev)
-
 
1396
{
-
 
1397
	u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp;
-
 
1398
 
-
 
1399
	tmp = RREG32(MC_SHARED_CHMAP);
-
 
1400
	switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
-
 
1401
	case 0:
-
 
1402
	case 1:
-
 
1403
	case 2:
932
}
1404
	case 3:
-
 
1405
	default:
-
 
1406
		/* default mapping */
-
 
1407
		mc_shared_chremap = 0x00fac688;
-
 
1408
		break;
-
 
1409
	}
-
 
1410
 
-
 
1411
	switch (rdev->family) {
-
 
1412
	case CHIP_HEMLOCK:
-
 
1413
	case CHIP_CYPRESS:
-
 
1414
	case CHIP_BARTS:
-
 
1415
		tcp_chan_steer_lo = 0x54763210;
-
 
1416
		tcp_chan_steer_hi = 0x0000ba98;
-
 
1417
		break;
-
 
1418
	case CHIP_JUNIPER:
-
 
1419
	case CHIP_REDWOOD:
-
 
1420
	case CHIP_CEDAR:
-
 
1421
	case CHIP_PALM:
-
 
1422
	case CHIP_SUMO:
-
 
1423
	case CHIP_SUMO2:
-
 
1424
	case CHIP_TURKS:
-
 
1425
	case CHIP_CAICOS:
-
 
1426
	default:
-
 
1427
		tcp_chan_steer_lo = 0x76543210;
-
 
1428
		tcp_chan_steer_hi = 0x0000ba98;
-
 
1429
		break;
-
 
1430
	}
-
 
1431
 
-
 
1432
	WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo);
-
 
1433
	WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi);
-
 
1434
	WREG32(MC_SHARED_CHREMAP, mc_shared_chremap);
Line 933... Line 1435...
933
#endif
1435
}
934
 
1436
 
935
static void evergreen_gpu_init(struct radeon_device *rdev)
1437
static void evergreen_gpu_init(struct radeon_device *rdev)
936
{
1438
{
Line 1357... Line 1859...
1357
		break;
1859
		break;
1358
	case 8:
1860
	case 8:
1359
		rdev->config.evergreen.tile_config |= (3 << 0);
1861
		rdev->config.evergreen.tile_config |= (3 << 0);
1360
		break;
1862
		break;
1361
	}
1863
	}
1362
	/* num banks is 8 on all fusion asics */
1864
	/* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
1363
	if (rdev->flags & RADEON_IS_IGP)
1865
	if (rdev->flags & RADEON_IS_IGP)
1364
		rdev->config.evergreen.tile_config |= 8 << 4;
1866
		rdev->config.evergreen.tile_config |= 1 << 4;
1365
	else
1867
	else
1366
		rdev->config.evergreen.tile_config |=
1868
		rdev->config.evergreen.tile_config |=
1367
			((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1869
			((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4;
1368
	rdev->config.evergreen.tile_config |=
1870
	rdev->config.evergreen.tile_config |=
1369
		((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
1871
		((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8;
Line 1639... Line 2141...
1639
	return 0;
2141
	return 0;
1640
}
2142
}
Line 1641... Line 2143...
1641
 
2143
 
1642
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
2144
bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
-
 
2145
{
-
 
2146
	u32 srbm_status;
1643
{
2147
	u32 grbm_status;
-
 
2148
	u32 grbm_status_se0, grbm_status_se1;
-
 
2149
	struct r100_gpu_lockup *lockup = &rdev->config.evergreen.lockup;
-
 
2150
	int r;
-
 
2151
 
-
 
2152
	srbm_status = RREG32(SRBM_STATUS);
-
 
2153
	grbm_status = RREG32(GRBM_STATUS);
-
 
2154
	grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
-
 
2155
	grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
-
 
2156
	if (!(grbm_status & GUI_ACTIVE)) {
1644
	/* FIXME: implement for evergreen */
2157
		r100_gpu_lockup_update(lockup, &rdev->cp);
1645
	return false;
2158
	return false;
-
 
2159
	}
-
 
2160
	/* force CP activities */
-
 
2161
	r = radeon_ring_lock(rdev, 2);
-
 
2162
	if (!r) {
-
 
2163
		/* PACKET2 NOP */
-
 
2164
		radeon_ring_write(rdev, 0x80000000);
-
 
2165
		radeon_ring_write(rdev, 0x80000000);
-
 
2166
		radeon_ring_unlock_commit(rdev);
-
 
2167
	}
-
 
2168
	rdev->cp.rptr = RREG32(CP_RB_RPTR);
-
 
2169
	return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
Line 1646... Line 2170...
1646
}
2170
}
1647
 
2171
 
1648
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
2172
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
1649
{
2173
{
Line 1805... Line 2329...
1805
	if (r)
2329
	if (r)
1806
		return r;
2330
		return r;
1807
#endif
2331
#endif
Line 1808... Line 2332...
1808
 
2332
 
1809
	/* Enable IRQ */
-
 
1810
	r = r600_irq_init(rdev);
-
 
1811
	if (r) {
-
 
1812
		DRM_ERROR("radeon: IH init failed (%d).\n", r);
-
 
1813
		radeon_irq_kms_fini(rdev);
-
 
1814
		return r;
-
 
1815
	}
-
 
Line 1816... Line 2333...
1816
//	evergreen_irq_set(rdev);
2333
	/* Enable IRQ */
1817
 
2334
 
1818
    r = radeon_ring_init(rdev, rdev->cp.ring_size);
2335
    r = radeon_ring_init(rdev, rdev->cp.ring_size);
1819
	if (r)
2336
	if (r)
1820
		return r;
2337
		return r;
1821
	r = evergreen_cp_load_microcode(rdev);
2338
	r = evergreen_cp_load_microcode(rdev);
1822
	if (r)
2339
	if (r)
1823
		return r;
2340
		return r;
1824
	r = evergreen_cp_resume(rdev);
2341
	r = evergreen_cp_resume(rdev);
1825
	if (r)
-
 
1826
		return r;
-
 
Line 1827... Line 2342...
1827
	/* write back buffer are not vital so don't worry about failure */
2342
	if (r)
1828
	r600_wb_enable(rdev);
2343
		return r;
Line 1829... Line -...
1829
 
-
 
1830
	return 0;
-
 
1831
}
-
 
Line 1832... Line -...
1832
 
-
 
1833
int evergreen_resume(struct radeon_device *rdev)
-
 
1834
{
-
 
1835
	int r;
-
 
1836
 
-
 
1837
	/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
-
 
Line 1838... Line -...
1838
	 * posting will perform necessary task to bring back GPU into good
-
 
1839
	 * shape.
-
 
1840
	 */
-
 
1841
	/* post card */
-
 
1842
	atom_asic_init(rdev->mode_info.atom_context);
-
 
1843
 
-
 
1844
	r = evergreen_startup(rdev);
-
 
1845
	if (r) {
-
 
1846
		DRM_ERROR("r600 startup failed on resume\n");
-
 
1847
		return r;
-
 
1848
	}
-
 
1849
#if 0
-
 
1850
	r = r600_ib_test(rdev);
-
 
1851
	if (r) {
-
 
1852
		DRM_ERROR("radeon: failled testing IB (%d).\n", r);
-
 
1853
		return r;
-
 
1854
	}
-
 
1855
#endif
-
 
1856
	return r;
-
 
1857
 
-
 
1858
}
-
 
1859
 
-
 
1860
int evergreen_suspend(struct radeon_device *rdev)
-
 
1861
{
-
 
1862
	int r;
-
 
1863
 
-
 
1864
	/* FIXME: we should wait for ring to be empty */
-
 
1865
	r700_cp_stop(rdev);
-
 
1866
	rdev->cp.ready = false;
-
 
1867
	r600_wb_disable(rdev);
-
 
1868
	evergreen_pcie_gart_disable(rdev);
-
 
1869
#if 0
-
 
1870
	/* unpin shaders bo */
-
 
1871
	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
-
 
1872
	if (likely(r == 0)) {
-
 
Line 1873... Line -...
1873
		radeon_bo_unpin(rdev->r600_blit.shader_obj);
-
 
1874
		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
-
 
1875
	}
-
 
1876
#endif
-
 
1877
	return 0;
-
 
1878
}
-
 
1879
 
-
 
1880
static bool evergreen_card_posted(struct radeon_device *rdev)
-
 
1881
{
-
 
1882
	u32 reg;
-
 
1883
 
-
 
1884
	/* first check CRTCs */
-
 
1885
	reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
-
 
Line 1886... Line -...
1886
		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
-
 
1887
		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
-
 
1888
		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
-
 
Line 1889... Line -...
1889
		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
-
 
1890
		RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
-
 
Line 1891... Line 2344...
1891
	if (reg & EVERGREEN_CRTC_MASTER_EN)
2344
 
1892
		return true;
2345
	return 0;
1893
 
2346
}
1894
	/* then check MEM_SIZE, in case the crtcs are off */
2347
 
Line 1906... Line 2359...
1906
 */
2359
 */
1907
int evergreen_init(struct radeon_device *rdev)
2360
int evergreen_init(struct radeon_device *rdev)
1908
{
2361
{
1909
	int r;
2362
	int r;
Line 1910... Line -...
1910
 
-
 
1911
	r = radeon_dummy_page_init(rdev);
-
 
1912
	if (r)
-
 
1913
		return r;
2363
 
1914
	/* This don't do much */
2364
	/* This don't do much */
1915
	r = radeon_gem_init(rdev);
2365
	r = radeon_gem_init(rdev);
1916
	if (r)
2366
	if (r)
1917
		return r;
2367
		return r;
Line 1920... Line 2370...
1920
		if (ASIC_IS_AVIVO(rdev))
2370
		if (ASIC_IS_AVIVO(rdev))
1921
			return -EINVAL;
2371
			return -EINVAL;
1922
	}
2372
	}
1923
	/* Must be an ATOMBIOS */
2373
	/* Must be an ATOMBIOS */
1924
	if (!rdev->is_atom_bios) {
2374
	if (!rdev->is_atom_bios) {
1925
		dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
2375
		dev_err(rdev->dev, "Expecting atombios for evergreen GPU\n");
1926
		return -EINVAL;
2376
		return -EINVAL;
1927
	}
2377
	}
1928
	r = radeon_atombios_init(rdev);
2378
	r = radeon_atombios_init(rdev);
1929
	if (r)
2379
	if (r)
1930
		return r;
2380
		return r;
-
 
2381
	/* reset the asic, the gfx blocks are often in a bad state
-
 
2382
	 * after the driver is unloaded or after a resume
-
 
2383
	 */
-
 
2384
	if (radeon_asic_reset(rdev))
-
 
2385
		dev_warn(rdev->dev, "GPU reset failed !\n");
1931
	/* Post card if necessary */
2386
	/* Post card if necessary */
1932
	if (!evergreen_card_posted(rdev)) {
2387
	if (!radeon_card_posted(rdev)) {
1933
		if (!rdev->bios) {
2388
		if (!rdev->bios) {
1934
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
2389
			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
1935
			return -EINVAL;
2390
			return -EINVAL;
1936
		}
2391
		}
1937
		DRM_INFO("GPU not posted. posting now...\n");
2392
		DRM_INFO("GPU not posted. posting now...\n");
Line 1942... Line 2397...
1942
	/* Initialize surface registers */
2397
	/* Initialize surface registers */
1943
	radeon_surface_init(rdev);
2398
	radeon_surface_init(rdev);
1944
	/* Initialize clocks */
2399
	/* Initialize clocks */
1945
	radeon_get_clock_info(rdev->ddev);
2400
	radeon_get_clock_info(rdev->ddev);
1946
	/* Fence driver */
2401
	/* Fence driver */
1947
//	r = radeon_fence_driver_init(rdev);
-
 
1948
//	if (r)
-
 
1949
//		return r;
-
 
1950
    /* initialize AGP */
2402
    /* initialize AGP */
1951
	if (rdev->flags & RADEON_IS_AGP) {
2403
	if (rdev->flags & RADEON_IS_AGP) {
1952
		r = radeon_agp_init(rdev);
2404
		r = radeon_agp_init(rdev);
1953
		if (r)
2405
		if (r)
1954
			radeon_agp_disable(rdev);
2406
			radeon_agp_disable(rdev);
Line 1960... Line 2412...
1960
	/* Memory manager */
2412
	/* Memory manager */
1961
	r = radeon_bo_init(rdev);
2413
	r = radeon_bo_init(rdev);
1962
	if (r)
2414
	if (r)
1963
		return r;
2415
		return r;
Line 1964... Line -...
1964
 
-
 
1965
	r = radeon_irq_kms_init(rdev);
-
 
1966
	if (r)
-
 
Line 1967... Line 2416...
1967
		return r;
2416
 
1968
 
2417
 
Line 1969... Line 2418...
1969
	rdev->cp.ring_obj = NULL;
2418
	rdev->cp.ring_obj = NULL;
Line 1978... Line 2427...
1978
 
2427
 
1979
	rdev->accel_working = true;
2428
	rdev->accel_working = true;
1980
	r = evergreen_startup(rdev);
2429
	r = evergreen_startup(rdev);
1981
	if (r) {
2430
	if (r) {
1982
		dev_err(rdev->dev, "disabling GPU acceleration\n");
-
 
1983
		r700_cp_fini(rdev);
-
 
1984
		r600_irq_fini(rdev);
-
 
1985
		radeon_irq_kms_fini(rdev);
-
 
1986
		evergreen_pcie_gart_fini(rdev);
2431
		dev_err(rdev->dev, "disabling GPU acceleration\n");
1987
		rdev->accel_working = false;
2432
		rdev->accel_working = false;
1988
	}
2433
	}
1989
	if (rdev->accel_working) {
-
 
1990
		r = radeon_ib_pool_init(rdev);
-
 
1991
		if (r) {
-
 
1992
			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
-
 
1993
			rdev->accel_working = false;
-
 
1994
		}
-
 
1995
		r = r600_ib_test(rdev);
-
 
1996
		if (r) {
-
 
1997
			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
-
 
1998
			rdev->accel_working = false;
-
 
1999
		}
2434
	if (rdev->accel_working) {
2000
	}
2435
	}
2001
	return 0;
2436
	return 0;
Line -... Line 2437...
-
 
2437
}
2002
}
2438
 
2003
 
2439
 
2004
void evergreen_fini(struct radeon_device *rdev)
2440
static void evergreen_pcie_gen2_enable(struct radeon_device *rdev)
-
 
2441
{
2005
{
2442
	u32 link_width_cntl, speed_cntl;
2006
	/*r600_blit_fini(rdev);*/
2443
 
-
 
2444
	if (radeon_pcie_gen2 == 0)
2007
	r700_cp_fini(rdev);
2445
		return;
2008
	r600_irq_fini(rdev);
2446
 
-
 
2447
	if (rdev->flags & RADEON_IS_IGP)
2009
	radeon_wb_fini(rdev);
2448
		return;
2010
	radeon_irq_kms_fini(rdev);
2449
 
-
 
2450
	if (!(rdev->flags & RADEON_IS_PCIE))
2011
	evergreen_pcie_gart_fini(rdev);
2451
		return;
2012
	radeon_gem_fini(rdev);
2452
 
2013
	radeon_fence_driver_fini(rdev);
2453
	/* x2 cards have a special sequence */
-
 
2454
	if (ASIC_IS_X2(rdev))
-
 
2455
		return;
-
 
2456
 
-
 
2457
	speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2458
	if ((speed_cntl & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
-
 
2459
	    (speed_cntl & LC_OTHER_SIDE_SUPPORTS_GEN2)) {
-
 
2460
 
-
 
2461
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
-
 
2462
		link_width_cntl &= ~LC_UPCONFIGURE_DIS;
-
 
2463
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
-
 
2464
 
-
 
2465
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2466
		speed_cntl &= ~LC_TARGET_LINK_SPEED_OVERRIDE_EN;
-
 
2467
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
2468
 
-
 
2469
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2470
		speed_cntl |= LC_CLR_FAILED_SPD_CHANGE_CNT;
-
 
2471
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
2472
 
-
 
2473
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2474
		speed_cntl &= ~LC_CLR_FAILED_SPD_CHANGE_CNT;
-
 
2475
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
2014
	radeon_agp_fini(rdev);
2476
 
-
 
2477
		speed_cntl = RREG32_PCIE_P(PCIE_LC_SPEED_CNTL);
-
 
2478
		speed_cntl |= LC_GEN2_EN_STRAP;
2015
	radeon_bo_fini(rdev);
2479
		WREG32_PCIE_P(PCIE_LC_SPEED_CNTL, speed_cntl);
-
 
2480
 
-
 
2481
	} else {
2016
	radeon_atombios_fini(rdev);
2482
		link_width_cntl = RREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL);
2017
    kfree(rdev->bios);
2483
		/* XXX: only disable it if gen1 bridge vendor == 0x111d or 0x1106 */
-
 
2484
		if (1)
-
 
2485
			link_width_cntl |= LC_UPCONFIGURE_DIS;
-
 
2486
		else
-
 
2487
			link_width_cntl &= ~LC_UPCONFIGURE_DIS;
2018
	rdev->bios = NULL;
2488
		WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);