Subversion Repositories Kolibri OS

Rev

Rev 5346 | Rev 6661 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5346 Rev 6104
Line 28... Line 28...
28
//#include 
28
//#include 
29
#include 
29
#include 
30
#include 
30
#include 
31
#include 
31
#include 
32
#include 
32
#include 
-
 
33
#include 
33
#include "radeon_reg.h"
34
#include "radeon_reg.h"
34
#include "radeon.h"
35
#include "radeon.h"
35
#include "atom.h"
36
#include "atom.h"
Line 36... Line 37...
36
 
37
 
Line 70... Line 71...
70
int radeon_deep_color = 0;
71
int radeon_deep_color = 0;
71
int radeon_use_pflipirq = 2;
72
int radeon_use_pflipirq = 2;
72
int irq_override = 0;
73
int irq_override = 0;
73
int radeon_bapm = -1;
74
int radeon_bapm = -1;
74
int radeon_backlight = 0;
75
int radeon_backlight = 0;
-
 
76
int radeon_auxch = -1;
-
 
77
int radeon_mst = 0;
75
 
78
 
76
extern display_t *os_display;
79
extern display_t *os_display;
77
extern struct drm_device *main_device;
80
extern struct drm_device *main_device;
78
extern videomode_t usermode;
81
extern videomode_t usermode;
Line 269... Line 272...
269
 *
272
 *
270
 * Clear GPU surface registers (r1xx-r5xx).
273
 * Clear GPU surface registers (r1xx-r5xx).
271
 */
274
 */
272
void radeon_surface_init(struct radeon_device *rdev)
275
void radeon_surface_init(struct radeon_device *rdev)
273
{
276
{
274
    /* FIXME: check this out */
277
	/* FIXME: check this out */
275
    if (rdev->family < CHIP_R600) {
278
	if (rdev->family < CHIP_R600) {
276
        int i;
279
		int i;
Line 277... Line 280...
277
 
280
 
278
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
281
		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
279
			if (rdev->surface_regs[i].bo)
282
			if (rdev->surface_regs[i].bo)
280
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
283
				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
281
			else
284
			else
282
           radeon_clear_surface_reg(rdev, i);
285
				radeon_clear_surface_reg(rdev, i);
283
        }
286
		}
284
		/* enable surfaces */
287
		/* enable surfaces */
285
		WREG32(RADEON_SURFACE_CNTL, 0);
288
		WREG32(RADEON_SURFACE_CNTL, 0);
286
    }
289
	}
Line 287... Line 290...
287
}
290
}
288
 
291
 
289
/*
292
/*
Line 296... Line 299...
296
 *
299
 *
297
 * Init CP scratch register driver information (r1xx-r5xx)
300
 * Init CP scratch register driver information (r1xx-r5xx)
298
 */
301
 */
299
void radeon_scratch_init(struct radeon_device *rdev)
302
void radeon_scratch_init(struct radeon_device *rdev)
300
{
303
{
301
    int i;
304
	int i;
Line 302... Line 305...
302
 
305
 
303
    /* FIXME: check this out */
306
	/* FIXME: check this out */
304
    if (rdev->family < CHIP_R300) {
307
	if (rdev->family < CHIP_R300) {
305
        rdev->scratch.num_reg = 5;
308
		rdev->scratch.num_reg = 5;
306
    } else {
309
	} else {
307
        rdev->scratch.num_reg = 7;
310
		rdev->scratch.num_reg = 7;
308
    }
311
	}
309
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
312
	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
310
    for (i = 0; i < rdev->scratch.num_reg; i++) {
313
	for (i = 0; i < rdev->scratch.num_reg; i++) {
311
        rdev->scratch.free[i] = true;
314
		rdev->scratch.free[i] = true;
312
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
315
		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
313
    }
316
	}
Line 314... Line 317...
314
}
317
}
315
 
318
 
316
/**
319
/**
Line 529... Line 532...
529
				     &rdev->wb.wb_obj);
532
				     &rdev->wb.wb_obj);
530
		if (r) {
533
		if (r) {
531
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
534
			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
532
			return r;
535
			return r;
533
		}
536
		}
534
	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
537
		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
535
	if (unlikely(r != 0)) {
538
		if (unlikely(r != 0)) {
536
		radeon_wb_fini(rdev);
539
			radeon_wb_fini(rdev);
537
		return r;
540
			return r;
538
	}
541
		}
539
	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
542
		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
540
			  &rdev->wb.gpu_addr);
543
				&rdev->wb.gpu_addr);
541
	if (r) {
544
		if (r) {
-
 
545
			radeon_bo_unreserve(rdev->wb.wb_obj);
-
 
546
			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-
 
547
			radeon_wb_fini(rdev);
-
 
548
			return r;
-
 
549
		}
-
 
550
		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
542
		radeon_bo_unreserve(rdev->wb.wb_obj);
551
		radeon_bo_unreserve(rdev->wb.wb_obj);
543
		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-
 
544
		radeon_wb_fini(rdev);
-
 
545
		return r;
-
 
546
	}
-
 
547
	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-
 
548
	radeon_bo_unreserve(rdev->wb.wb_obj);
-
 
549
	if (r) {
552
		if (r) {
550
		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
553
			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
551
		radeon_wb_fini(rdev);
554
			radeon_wb_fini(rdev);
552
		return r;
555
			return r;
553
	}
556
		}
554
	}
557
	}
Line 555... Line 558...
555
 
558
 
556
	/* clear wb memory */
559
	/* clear wb memory */
557
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
560
	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
Line 560... Line 563...
560
	/* disabled via module param */
563
	/* disabled via module param */
561
	if (radeon_no_wb == 1) {
564
	if (radeon_no_wb == 1) {
562
		rdev->wb.enabled = false;
565
		rdev->wb.enabled = false;
563
	} else {
566
	} else {
564
		if (rdev->flags & RADEON_IS_AGP) {
567
		if (rdev->flags & RADEON_IS_AGP) {
565
		/* often unreliable on AGP */
568
			/* often unreliable on AGP */
566
			rdev->wb.enabled = false;
569
			rdev->wb.enabled = false;
567
		} else if (rdev->family < CHIP_R300) {
570
		} else if (rdev->family < CHIP_R300) {
568
			/* often unreliable on pre-r300 */
571
			/* often unreliable on pre-r300 */
569
			rdev->wb.enabled = false;
572
			rdev->wb.enabled = false;
570
		} else {
573
		} else {
571
			rdev->wb.enabled = true;
574
			rdev->wb.enabled = true;
572
			/* event_write fences are only available on r600+ */
575
			/* event_write fences are only available on r600+ */
573
			if (rdev->family >= CHIP_R600) {
576
			if (rdev->family >= CHIP_R600) {
574
				rdev->wb.use_event = true;
577
				rdev->wb.use_event = true;
575
	}
578
			}
576
		}
579
		}
577
	}
580
	}
578
	/* always use writeback/events on NI, APUs */
581
	/* always use writeback/events on NI, APUs */
579
	if (rdev->family >= CHIP_PALM) {
582
	if (rdev->family >= CHIP_PALM) {
580
		rdev->wb.enabled = true;
583
		rdev->wb.enabled = true;
Line 640... Line 643...
640
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
643
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
641
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
644
	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
642
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
645
		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
643
		mc->real_vram_size = mc->aper_size;
646
		mc->real_vram_size = mc->aper_size;
644
		mc->mc_vram_size = mc->aper_size;
647
		mc->mc_vram_size = mc->aper_size;
645
		}
648
	}
646
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
649
	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
647
	if (limit && limit < mc->real_vram_size)
650
	if (limit && limit < mc->real_vram_size)
648
		mc->real_vram_size = limit;
651
		mc->real_vram_size = limit;
649
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
652
	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
650
			mc->mc_vram_size >> 20, mc->vram_start,
653
			mc->mc_vram_size >> 20, mc->vram_start,
Line 714... Line 717...
714
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
717
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
715
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
718
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
716
			}
719
			}
717
			if (rdev->num_crtc >= 6) {
720
			if (rdev->num_crtc >= 6) {
718
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
721
				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
719
			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
722
					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
720
			}
723
			}
721
		if (reg & EVERGREEN_CRTC_MASTER_EN)
724
		if (reg & EVERGREEN_CRTC_MASTER_EN)
722
			return true;
725
			return true;
723
	} else if (ASIC_IS_AVIVO(rdev)) {
726
	} else if (ASIC_IS_AVIVO(rdev)) {
724
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
727
		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
Line 761... Line 764...
761
	fixed20_12 a;
764
	fixed20_12 a;
762
	u32 sclk = rdev->pm.current_sclk;
765
	u32 sclk = rdev->pm.current_sclk;
763
	u32 mclk = rdev->pm.current_mclk;
766
	u32 mclk = rdev->pm.current_mclk;
Line 764... Line 767...
764
 
767
 
765
	/* sclk/mclk in Mhz */
768
	/* sclk/mclk in Mhz */
766
		a.full = dfixed_const(100);
769
	a.full = dfixed_const(100);
767
		rdev->pm.sclk.full = dfixed_const(sclk);
770
	rdev->pm.sclk.full = dfixed_const(sclk);
768
		rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
771
	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
769
		rdev->pm.mclk.full = dfixed_const(mclk);
772
	rdev->pm.mclk.full = dfixed_const(mclk);
Line 770... Line 773...
770
		rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
773
	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
771
 
774
 
772
	if (rdev->flags & RADEON_IS_IGP) {
775
	if (rdev->flags & RADEON_IS_IGP) {
773
		a.full = dfixed_const(16);
776
		a.full = dfixed_const(16);
Line 859... Line 862...
859
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
862
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
860
 * Returns the value of the PLL register.
863
 * Returns the value of the PLL register.
861
 */
864
 */
862
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
865
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
863
{
866
{
864
    struct radeon_device *rdev = info->dev->dev_private;
867
	struct radeon_device *rdev = info->dev->dev_private;
865
    uint32_t r;
868
	uint32_t r;
Line 866... Line 869...
866
 
869
 
867
    r = rdev->pll_rreg(rdev, reg);
870
	r = rdev->pll_rreg(rdev, reg);
868
    return r;
871
	return r;
Line 869... Line 872...
869
}
872
}
870
 
873
 
871
/**
874
/**
Line 877... Line 880...
877
 *
880
 *
878
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
881
 * Provides a PLL register accessor for the atom interpreter (r4xx+).
879
 */
882
 */
880
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
883
static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
881
{
884
{
882
    struct radeon_device *rdev = info->dev->dev_private;
885
	struct radeon_device *rdev = info->dev->dev_private;
Line 883... Line 886...
883
 
886
 
884
    rdev->pll_wreg(rdev, reg, val);
887
	rdev->pll_wreg(rdev, reg, val);
Line 885... Line 888...
885
}
888
}
886
 
889
 
887
/**
890
/**
Line 893... Line 896...
893
 * Provides an MC register accessor for the atom interpreter (r4xx+).
896
 * Provides an MC register accessor for the atom interpreter (r4xx+).
894
 * Returns the value of the MC register.
897
 * Returns the value of the MC register.
895
 */
898
 */
896
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
899
static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
897
{
900
{
898
    struct radeon_device *rdev = info->dev->dev_private;
901
	struct radeon_device *rdev = info->dev->dev_private;
899
    uint32_t r;
902
	uint32_t r;
Line 900... Line 903...
900
 
903
 
901
    r = rdev->mc_rreg(rdev, reg);
904
	r = rdev->mc_rreg(rdev, reg);
902
    return r;
905
	return r;
Line 903... Line 906...
903
}
906
}
904
 
907
 
905
/**
908
/**
Line 911... Line 914...
911
 *
914
 *
912
 * Provides a MC register accessor for the atom interpreter (r4xx+).
915
 * Provides a MC register accessor for the atom interpreter (r4xx+).
913
 */
916
 */
914
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
917
static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
915
{
918
{
916
    struct radeon_device *rdev = info->dev->dev_private;
919
	struct radeon_device *rdev = info->dev->dev_private;
Line 917... Line 920...
917
 
920
 
918
    rdev->mc_wreg(rdev, reg, val);
921
	rdev->mc_wreg(rdev, reg, val);
Line 919... Line 922...
919
}
922
}
920
 
923
 
921
/**
924
/**
Line 927... Line 930...
927
 *
930
 *
928
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
931
 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
929
 */
932
 */
930
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
933
static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
931
{
934
{
932
    struct radeon_device *rdev = info->dev->dev_private;
935
	struct radeon_device *rdev = info->dev->dev_private;
Line 933... Line 936...
933
 
936
 
934
    WREG32(reg*4, val);
937
	WREG32(reg*4, val);
Line 935... Line 938...
935
}
938
}
936
 
939
 
937
/**
940
/**
Line 943... Line 946...
943
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
946
 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
944
 * Returns the value of the MMIO register.
947
 * Returns the value of the MMIO register.
945
 */
948
 */
946
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
949
static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
947
{
950
{
948
    struct radeon_device *rdev = info->dev->dev_private;
951
	struct radeon_device *rdev = info->dev->dev_private;
949
    uint32_t r;
952
	uint32_t r;
Line 950... Line 953...
950
 
953
 
951
    r = RREG32(reg*4);
954
	r = RREG32(reg*4);
952
    return r;
955
	return r;
Line 953... Line 956...
953
}
956
}
954
 
957
 
955
/**
958
/**
Line 1028... Line 1031...
1028
		return -ENOMEM;
1031
		return -ENOMEM;
1029
	}
1032
	}
Line 1030... Line 1033...
1030
 
1033
 
1031
	mutex_init(&rdev->mode_info.atom_context->mutex);
1034
	mutex_init(&rdev->mode_info.atom_context->mutex);
1032
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1035
	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1033
    radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1036
	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
1034
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1037
	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1035
    return 0;
1038
	return 0;
Line 1036... Line 1039...
1036
}
1039
}
1037
 
1040
 
1038
/**
1041
/**
Line 1122... Line 1125...
1122
{
1125
{
1123
	return (arg & (arg - 1)) == 0;
1126
	return (arg & (arg - 1)) == 0;
1124
}
1127
}
Line 1125... Line 1128...
1125
 
1128
 
-
 
1129
/**
-
 
1130
 * Determine a sensible default GART size according to ASIC family.
-
 
1131
 *
-
 
1132
 * @family ASIC family name
-
 
1133
 */
-
 
1134
static int radeon_gart_size_auto(enum radeon_family family)
-
 
1135
{
-
 
1136
	/* default to a larger gart size on newer asics */
-
 
1137
	if (family >= CHIP_TAHITI)
-
 
1138
		return 2048;
-
 
1139
	else if (family >= CHIP_RV770)
-
 
1140
		return 1024;
-
 
1141
	else
-
 
1142
		return 512;
-
 
1143
}
-
 
1144
 
1126
/**
1145
/**
1127
 * radeon_check_arguments - validate module params
1146
 * radeon_check_arguments - validate module params
1128
 *
1147
 *
1129
 * @rdev: radeon_device pointer
1148
 * @rdev: radeon_device pointer
1130
 *
1149
 *
Line 1139... Line 1158...
1139
				radeon_vram_limit);
1158
				radeon_vram_limit);
1140
		radeon_vram_limit = 0;
1159
		radeon_vram_limit = 0;
1141
	}
1160
	}
Line 1142... Line 1161...
1142
 
1161
 
1143
	if (radeon_gart_size == -1) {
1162
	if (radeon_gart_size == -1) {
1144
		/* default to a larger gart size on newer asics */
-
 
1145
		if (rdev->family >= CHIP_RV770)
-
 
1146
			radeon_gart_size = 1024;
-
 
1147
		else
-
 
1148
			radeon_gart_size = 512;
1163
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1149
	}
1164
	}
1150
	/* gtt size must be power of two and greater or equal to 32M */
1165
	/* gtt size must be power of two and greater or equal to 32M */
1151
	if (radeon_gart_size < 32) {
1166
	if (radeon_gart_size < 32) {
1152
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1167
		dev_warn(rdev->dev, "gart size (%d) too small\n",
1153
				radeon_gart_size);
-
 
1154
		if (rdev->family >= CHIP_RV770)
-
 
1155
			radeon_gart_size = 1024;
-
 
1156
		else
1168
				radeon_gart_size);
1157
		radeon_gart_size = 512;
1169
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1158
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1170
	} else if (!radeon_check_pot_argument(radeon_gart_size)) {
1159
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1171
		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1160
				radeon_gart_size);
-
 
1161
		if (rdev->family >= CHIP_RV770)
-
 
1162
			radeon_gart_size = 1024;
-
 
1163
		else
1172
				radeon_gart_size);
1164
		radeon_gart_size = 512;
1173
		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1165
	}
1174
	}
Line 1166... Line 1175...
1166
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1175
	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1167
 
1176
 
Line 1242... Line 1251...
1242
 * Initializes the driver info and hw (all asics).
1251
 * Initializes the driver info and hw (all asics).
1243
 * Returns 0 for success or an error on failure.
1252
 * Returns 0 for success or an error on failure.
1244
 * Called at driver startup.
1253
 * Called at driver startup.
1245
 */
1254
 */
1246
int radeon_device_init(struct radeon_device *rdev,
1255
int radeon_device_init(struct radeon_device *rdev,
1247
               struct drm_device *ddev,
1256
		       struct drm_device *ddev,
1248
               struct pci_dev *pdev,
1257
		       struct pci_dev *pdev,
1249
               uint32_t flags)
1258
		       uint32_t flags)
1250
{
1259
{
1251
	int r, i;
1260
	int r, i;
1252
	int dma_bits;
1261
	int dma_bits;
1253
	bool runtime = false;
1262
	bool runtime = false;
Line 1254... Line 1263...
1254
 
1263
 
1255
    rdev->shutdown = false;
1264
	rdev->shutdown = false;
1256
	rdev->dev = &pdev->dev;
1265
	rdev->dev = &pdev->dev;
1257
    rdev->ddev = ddev;
1266
	rdev->ddev = ddev;
1258
    rdev->pdev = pdev;
1267
	rdev->pdev = pdev;
1259
    rdev->flags = flags;
1268
	rdev->flags = flags;
1260
    rdev->family = flags & RADEON_FAMILY_MASK;
1269
	rdev->family = flags & RADEON_FAMILY_MASK;
1261
    rdev->is_atom_bios = false;
1270
	rdev->is_atom_bios = false;
1262
    rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1271
	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1263
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1272
	rdev->mc.gtt_size = 512 * 1024 * 1024;
1264
	rdev->accel_working = false;
1273
	rdev->accel_working = false;
1265
	/* set up ring ids */
1274
	/* set up ring ids */
1266
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1275
	for (i = 0; i < RADEON_NUM_RINGS; i++) {
Line 1270... Line 1279...
1270
 
1279
 
1271
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1280
	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1272
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1281
		radeon_family_name[rdev->family], pdev->vendor, pdev->device,
Line 1273... Line 1282...
1273
		pdev->subsystem_vendor, pdev->subsystem_device);
1282
		pdev->subsystem_vendor, pdev->subsystem_device);
1274
 
1283
 
1275
    /* mutex initialization are all done here so we
1284
	/* mutex initialization are all done here so we
1276
     * can recall function without having locking issues */
1285
	 * can recall function without having locking issues */
1277
	mutex_init(&rdev->ring_lock);
1286
	mutex_init(&rdev->ring_lock);
1278
	mutex_init(&rdev->dc_hw_i2c_mutex);
1287
	mutex_init(&rdev->dc_hw_i2c_mutex);
1279
	atomic_set(&rdev->ih.lock, 0);
1288
	atomic_set(&rdev->ih.lock, 0);
Line 1310... Line 1319...
1310
		rdev->flags &= ~RADEON_IS_AGP;
1319
		rdev->flags &= ~RADEON_IS_AGP;
1311
	}
1320
	}
Line 1312... Line 1321...
1312
 
1321
 
1313
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1322
	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1314
		radeon_agp_disable(rdev);
1323
		radeon_agp_disable(rdev);
Line 1315... Line 1324...
1315
    }
1324
	}
1316
 
1325
 
1317
	/* Set the internal MC address mask
1326
	/* Set the internal MC address mask
1318
	 * This is the max address of the GPU's
1327
	 * This is the max address of the GPU's
Line 1338... Line 1347...
1338
	    (rdev->family <= CHIP_RS740))
1347
	    (rdev->family <= CHIP_RS740))
1339
		rdev->need_dma32 = true;
1348
		rdev->need_dma32 = true;
Line 1340... Line 1349...
1340
 
1349
 
1341
	dma_bits = rdev->need_dma32 ? 32 : 40;
1350
	dma_bits = rdev->need_dma32 ? 32 : 40;
1342
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1351
	r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1343
    if (r) {
1352
	if (r) {
1344
		rdev->need_dma32 = true;
1353
		rdev->need_dma32 = true;
1345
		dma_bits = 32;
1354
		dma_bits = 32;
1346
        printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1355
		printk(KERN_WARNING "radeon: No suitable DMA available.\n");
Line 1347... Line 1356...
1347
    }
1356
	}
1348
 
1357
 
1349
    /* Registers mapping */
1358
	/* Registers mapping */
1350
    /* TODO: block userspace mapping of io register */
1359
	/* TODO: block userspace mapping of io register */
1351
	spin_lock_init(&rdev->mmio_idx_lock);
1360
	spin_lock_init(&rdev->mmio_idx_lock);
1352
	spin_lock_init(&rdev->smc_idx_lock);
1361
	spin_lock_init(&rdev->smc_idx_lock);
1353
	spin_lock_init(&rdev->pll_idx_lock);
1362
	spin_lock_init(&rdev->pll_idx_lock);
Line 1362... Line 1371...
1362
	spin_lock_init(&rdev->end_idx_lock);
1371
	spin_lock_init(&rdev->end_idx_lock);
1363
	if (rdev->family >= CHIP_BONAIRE) {
1372
	if (rdev->family >= CHIP_BONAIRE) {
1364
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1373
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1365
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1374
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1366
	} else {
1375
	} else {
1367
    rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1376
		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1368
    rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1377
		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1369
	}
1378
	}
1370
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1379
	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1371
    if (rdev->rmmio == NULL) {
1380
	if (rdev->rmmio == NULL) {
1372
        return -ENOMEM;
1381
		return -ENOMEM;
1373
    }
1382
	}
1374
    DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1383
	DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1375
    DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1384
	DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
Line 1376... Line 1385...
1376
 
1385
 
1377
	/* doorbell bar mapping */
1386
	/* doorbell bar mapping */
1378
	if (rdev->family >= CHIP_BONAIRE)
1387
	if (rdev->family >= CHIP_BONAIRE)
Line 1394... Line 1403...
1394
	if (rdev->flags & RADEON_IS_PX)
1403
	if (rdev->flags & RADEON_IS_PX)
1395
		runtime = true;
1404
		runtime = true;
Line 1396... Line 1405...
1396
 
1405
 
1397
	r = radeon_init(rdev);
1406
	r = radeon_init(rdev);
1398
	if (r)
1407
	if (r)
Line 1399... Line 1408...
1399
        return r;
1408
		goto failed;
1400
 
1409
 
Line 1407... Line 1416...
1407
		radeon_asic_reset(rdev);
1416
		radeon_asic_reset(rdev);
1408
		radeon_fini(rdev);
1417
		radeon_fini(rdev);
1409
		radeon_agp_disable(rdev);
1418
		radeon_agp_disable(rdev);
1410
		r = radeon_init(rdev);
1419
		r = radeon_init(rdev);
1411
		if (r)
1420
		if (r)
1412
		return r;
1421
			goto failed;
1413
	}
1422
	}
Line 1414... Line 1423...
1414
 
1423
 
1415
//   r = radeon_ib_ring_tests(rdev);
1424
//   r = radeon_ib_ring_tests(rdev);
1416
//   if (r)
1425
//   if (r)
Line 1426... Line 1435...
1426
		if (rdev->accel_working)
1435
		if (rdev->accel_working)
1427
			radeon_test_syncing(rdev);
1436
			radeon_test_syncing(rdev);
1428
		else
1437
		else
1429
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1438
			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1430
	}
1439
	}
1431
   if (radeon_benchmarking) {
1440
	if (radeon_benchmarking) {
1432
		if (rdev->accel_working)
1441
		if (rdev->accel_working)
1433
		radeon_benchmark(rdev, radeon_benchmarking);
1442
			radeon_benchmark(rdev, radeon_benchmarking);
1434
		else
1443
		else
1435
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1444
			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1436
    }
1445
	}
1437
	return 0;
1446
	return 0;
-
 
1447
 
-
 
1448
failed:
-
 
1449
	return r;
1438
}
1450
}
Line 1439... Line 1451...
1439
 
1451
 
1440
/**
1452
/**
1441
 * radeon_gpu_reset - reset the asic
1453
 * radeon_gpu_reset - reset the asic
Line 1613... Line 1625...
1613
 
1625
 
1614
static struct pci_device_id pciidlist[] = {
1626
static struct pci_device_id pciidlist[] = {
1615
    radeon_PCI_IDS
1627
    radeon_PCI_IDS
Line -... Line 1628...
-
 
1628
};
-
 
1629
 
-
 
1630
u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-
 
1631
int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-
 
1632
void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-
 
1633
int radeon_get_vblank_timestamp_kms(struct drm_device *dev, unsigned int pipe,
-
 
1634
				    int *max_error,
-
 
1635
				    struct timeval *vblank_time,
1616
};
1636
				    unsigned flags); 
1617
 
1637
void radeon_gem_object_free(struct drm_gem_object *obj); 
1618
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1638
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
1619
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
1639
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
Line 1630... Line 1650...
1630
//    .open = radeon_driver_open_kms,
1650
//    .open = radeon_driver_open_kms,
1631
//    .preclose = radeon_driver_preclose_kms,
1651
//    .preclose = radeon_driver_preclose_kms,
1632
//    .postclose = radeon_driver_postclose_kms,
1652
//    .postclose = radeon_driver_postclose_kms,
1633
//    .lastclose = radeon_driver_lastclose_kms,
1653
//    .lastclose = radeon_driver_lastclose_kms,
1634
//    .unload = radeon_driver_unload_kms,
1654
//    .unload = radeon_driver_unload_kms,
1635
//    .get_vblank_counter = radeon_get_vblank_counter_kms,
1655
    .get_vblank_counter = radeon_get_vblank_counter_kms,
1636
//    .enable_vblank = radeon_enable_vblank_kms,
1656
    .enable_vblank = radeon_enable_vblank_kms,
1637
//    .disable_vblank = radeon_disable_vblank_kms,
1657
    .disable_vblank = radeon_disable_vblank_kms,
1638
//    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1658
    .get_vblank_timestamp = radeon_get_vblank_timestamp_kms,
1639
//    .get_scanout_position = radeon_get_crtc_scanoutpos,
1659
    .get_scanout_position = radeon_get_crtc_scanoutpos,
1640
#if defined(CONFIG_DEBUG_FS)
1660
#if defined(CONFIG_DEBUG_FS)
1641
    .debugfs_init = radeon_debugfs_init,
1661
    .debugfs_init = radeon_debugfs_init,
1642
    .debugfs_cleanup = radeon_debugfs_cleanup,
1662
    .debugfs_cleanup = radeon_debugfs_cleanup,
1643
#endif
1663
#endif
1644
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1664
    .irq_preinstall = radeon_driver_irq_preinstall_kms,
1645
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1665
    .irq_postinstall = radeon_driver_irq_postinstall_kms,
1646
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1666
    .irq_uninstall = radeon_driver_irq_uninstall_kms,
1647
    .irq_handler = radeon_driver_irq_handler_kms,
1667
    .irq_handler = radeon_driver_irq_handler_kms,
1648
//    .ioctls = radeon_ioctls_kms,
1668
//    .ioctls = radeon_ioctls_kms,
1649
//    .gem_free_object = radeon_gem_object_free,
1669
    .gem_free_object = radeon_gem_object_free,
1650
//    .gem_open_object = radeon_gem_object_open,
1670
//    .gem_open_object = radeon_gem_object_open,
1651
//    .gem_close_object = radeon_gem_object_close,
1671
//    .gem_close_object = radeon_gem_object_close,
1652
//    .dumb_create = radeon_mode_dumb_create,
1672
//    .dumb_create = radeon_mode_dumb_create,
1653
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1673
//    .dumb_map_offset = radeon_mode_dumb_mmap,
1654
//    .dumb_destroy = drm_gem_dumb_destroy,
1674
//    .dumb_destroy = drm_gem_dumb_destroy,