Subversion Repositories Kolibri OS

Rev

Rev 5271 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6104
Line 127... Line 127...
127
	 * wait for another frame.
127
	 * wait for another frame.
128
	 */
128
	 */
129
	while (r100_is_in_vblank(rdev, crtc)) {
129
	while (r100_is_in_vblank(rdev, crtc)) {
130
		if (i++ % 100 == 0) {
130
		if (i++ % 100 == 0) {
131
			if (!r100_is_counter_moving(rdev, crtc))
131
			if (!r100_is_counter_moving(rdev, crtc))
132
					break;
132
				break;
133
			}
-
 
134
		}
133
		}
-
 
134
	}
Line 135... Line 135...
135
 
135
 
136
	while (!r100_is_in_vblank(rdev, crtc)) {
136
	while (!r100_is_in_vblank(rdev, crtc)) {
137
		if (i++ % 100 == 0) {
137
		if (i++ % 100 == 0) {
138
			if (!r100_is_counter_moving(rdev, crtc))
138
			if (!r100_is_counter_moving(rdev, crtc))
139
					break;
139
				break;
140
		}
140
		}
141
	}
141
	}
Line 142... Line 142...
142
}
142
}
Line 640... Line 640...
640
	}
640
	}
641
	/* Initialize common gart structure */
641
	/* Initialize common gart structure */
642
	r = radeon_gart_init(rdev);
642
	r = radeon_gart_init(rdev);
643
	if (r)
643
	if (r)
644
		return r;
644
		return r;
645
    rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
645
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
646
	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
646
	rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
-
 
647
	rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
647
	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
648
	rdev->asic->gart.set_page = &r100_pci_gart_set_page;
648
	return radeon_gart_table_ram_alloc(rdev);
649
	return radeon_gart_table_ram_alloc(rdev);
649
}
650
}
Line 650... Line 651...
650
 
651
 
Line 679... Line 680...
679
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
680
	WREG32(RADEON_AIC_CNTL, tmp & ~RADEON_PCIGART_TRANSLATE_EN);
680
	WREG32(RADEON_AIC_LO_ADDR, 0);
681
	WREG32(RADEON_AIC_LO_ADDR, 0);
681
	WREG32(RADEON_AIC_HI_ADDR, 0);
682
	WREG32(RADEON_AIC_HI_ADDR, 0);
682
}
683
}
Line -... Line 684...
-
 
684
 
-
 
685
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
-
 
686
{
-
 
687
	return addr;
-
 
688
}
683
 
689
 
684
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
690
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
685
			    uint64_t addr, uint32_t flags)
691
			    uint64_t entry)
686
{
692
{
687
	u32 *gtt = rdev->gart.ptr;
693
	u32 *gtt = rdev->gart.ptr;
688
	gtt[i] = cpu_to_le32(lower_32_bits(addr));
694
	gtt[i] = cpu_to_le32(lower_32_bits(entry));
Line 689... Line 695...
689
}
695
}
690
 
696
 
691
void r100_pci_gart_fini(struct radeon_device *rdev)
697
void r100_pci_gart_fini(struct radeon_device *rdev)
692
{
698
{
693
	radeon_gart_fini(rdev);
699
	radeon_gart_fini(rdev);
694
		r100_pci_gart_disable(rdev);
700
	r100_pci_gart_disable(rdev);
Line 695... Line 701...
695
	radeon_gart_table_ram_free(rdev);
701
	radeon_gart_table_ram_free(rdev);
696
}
702
}
Line 720... Line 726...
720
	}
726
	}
721
	if (rdev->irq.hpd[1]) {
727
	if (rdev->irq.hpd[1]) {
722
		tmp |= RADEON_FP2_DETECT_MASK;
728
		tmp |= RADEON_FP2_DETECT_MASK;
723
	}
729
	}
724
	WREG32(RADEON_GEN_INT_CNTL, tmp);
730
	WREG32(RADEON_GEN_INT_CNTL, tmp);
-
 
731
 
-
 
732
	/* read back to post the write */
-
 
733
	RREG32(RADEON_GEN_INT_CNTL);
-
 
734
 
725
	return 0;
735
	return 0;
726
}
736
}
Line 727... Line 737...
727
 
737
 
728
void r100_irq_disable(struct radeon_device *rdev)
738
void r100_irq_disable(struct radeon_device *rdev)
Line 767... Line 777...
767
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
777
			radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
768
		}
778
		}
769
		/* Vertical blank interrupts */
779
		/* Vertical blank interrupts */
770
		if (status & RADEON_CRTC_VBLANK_STAT) {
780
		if (status & RADEON_CRTC_VBLANK_STAT) {
771
			if (rdev->irq.crtc_vblank_int[0]) {
781
			if (rdev->irq.crtc_vblank_int[0]) {
772
//				drm_handle_vblank(rdev->ddev, 0);
782
				drm_handle_vblank(rdev->ddev, 0);
773
				rdev->pm.vblank_sync = true;
783
				rdev->pm.vblank_sync = true;
774
//				wake_up(&rdev->irq.vblank_queue);
784
				wake_up(&rdev->irq.vblank_queue);
775
			}
785
			}
776
//			if (rdev->irq.pflip[0])
786
			if (atomic_read(&rdev->irq.pflip[0]))
777
//				radeon_crtc_handle_flip(rdev, 0);
787
				radeon_crtc_handle_vblank(rdev, 0);
778
		}
788
		}
779
		if (status & RADEON_CRTC2_VBLANK_STAT) {
789
		if (status & RADEON_CRTC2_VBLANK_STAT) {
780
			if (rdev->irq.crtc_vblank_int[1]) {
790
			if (rdev->irq.crtc_vblank_int[1]) {
781
//				drm_handle_vblank(rdev->ddev, 1);
791
				drm_handle_vblank(rdev->ddev, 1);
782
				rdev->pm.vblank_sync = true;
792
				rdev->pm.vblank_sync = true;
783
//				wake_up(&rdev->irq.vblank_queue);
793
				wake_up(&rdev->irq.vblank_queue);
784
			}
794
			}
785
//			if (rdev->irq.pflip[1])
795
			if (atomic_read(&rdev->irq.pflip[1]))
786
//				radeon_crtc_handle_flip(rdev, 1);
796
				radeon_crtc_handle_vblank(rdev, 1);
787
		}
797
		}
788
		if (status & RADEON_FP_DETECT_STAT) {
798
		if (status & RADEON_FP_DETECT_STAT) {
789
			queue_hotplug = true;
799
			queue_hotplug = true;
790
			DRM_DEBUG("HPD1\n");
800
			DRM_DEBUG("HPD1\n");
791
		}
801
		}
Line 868... Line 878...
868
	BUG();
878
	BUG();
869
	return false;
879
	return false;
870
}
880
}
Line 871... Line 881...
871
 
881
 
872
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
882
struct radeon_fence *r100_copy_blit(struct radeon_device *rdev,
873
		   uint64_t src_offset,
883
				    uint64_t src_offset,
874
		   uint64_t dst_offset,
884
				    uint64_t dst_offset,
875
		   unsigned num_gpu_pages,
885
				    unsigned num_gpu_pages,
876
				    struct reservation_object *resv)
886
				    struct reservation_object *resv)
877
{
887
{
878
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
888
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
879
	struct radeon_fence *fence;
889
	struct radeon_fence *fence;
Line 1024... Line 1034...
1024
		   (rdev->family == CHIP_R580) ||
1034
		   (rdev->family == CHIP_R580) ||
1025
		   (rdev->family == CHIP_RV560) ||
1035
		   (rdev->family == CHIP_RV560) ||
1026
		   (rdev->family == CHIP_RV570)) {
1036
		   (rdev->family == CHIP_RV570)) {
1027
		DRM_INFO("Loading R500 Microcode\n");
1037
		DRM_INFO("Loading R500 Microcode\n");
1028
		fw_name = FIRMWARE_R520;
1038
		fw_name = FIRMWARE_R520;
1029
		}
1039
	}
Line 1030... Line 1040...
1030
 
1040
 
1031
	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1041
	err = request_firmware(&rdev->me_fw, fw_name, rdev->dev);
1032
   if (err) {
1042
	if (err) {
1033
       printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1043
		printk(KERN_ERR "radeon_cp: Failed to load firmware \"%s\"\n",
1034
              fw_name);
1044
		       fw_name);
1035
	} else if (rdev->me_fw->size % 8) {
1045
	} else if (rdev->me_fw->size % 8) {
1036
		printk(KERN_ERR
1046
		printk(KERN_ERR
1037
		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1047
		       "radeon_cp: Bogus length %zu in firmware \"%s\"\n",
1038
		       rdev->me_fw->size, fw_name);
1048
		       rdev->me_fw->size, fw_name);
Line 1550... Line 1560...
1550
	ib = p->ib.ptr;
1560
	ib = p->ib.ptr;
1551
	track = (struct r100_cs_track *)p->track;
1561
	track = (struct r100_cs_track *)p->track;
Line 1552... Line 1562...
1552
 
1562
 
Line 1553... Line 1563...
1553
	idx_value = radeon_get_ib_value(p, idx);
1563
	idx_value = radeon_get_ib_value(p, idx);
1554
 
1564
 
1555
		switch (reg) {
1565
	switch (reg) {
1556
		case RADEON_CRTC_GUI_TRIG_VLINE:
1566
	case RADEON_CRTC_GUI_TRIG_VLINE:
1557
			r = r100_cs_packet_parse_vline(p);
1567
		r = r100_cs_packet_parse_vline(p);
1558
			if (r) {
1568
		if (r) {
1559
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1569
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1560
						idx, reg);
1570
				  idx, reg);
1561
			radeon_cs_dump_packet(p, pkt);
1571
			radeon_cs_dump_packet(p, pkt);
1562
				return r;
1572
			return r;
1563
			}
1573
		}
1564
			break;
1574
		break;
1565
		/* FIXME: only allow PACKET3 blit? easier to check for out of
1575
		/* FIXME: only allow PACKET3 blit? easier to check for out of
1566
		 * range access */
1576
		 * range access */
1567
		case RADEON_DST_PITCH_OFFSET:
1577
	case RADEON_DST_PITCH_OFFSET:
1568
		case RADEON_SRC_PITCH_OFFSET:
1578
	case RADEON_SRC_PITCH_OFFSET:
1569
		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1579
		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1570
		if (r)
1580
		if (r)
1571
			return r;
1581
			return r;
1572
		break;
1582
		break;
1573
	case RADEON_RB3D_DEPTHOFFSET:
1583
	case RADEON_RB3D_DEPTHOFFSET:
1574
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1584
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1575
			if (r) {
1585
		if (r) {
1576
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1586
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1577
					  idx, reg);
1587
				  idx, reg);
1578
			radeon_cs_dump_packet(p, pkt);
1588
			radeon_cs_dump_packet(p, pkt);
1579
				return r;
1589
			return r;
1580
			}
1590
		}
1581
		track->zb.robj = reloc->robj;
1591
		track->zb.robj = reloc->robj;
1582
		track->zb.offset = idx_value;
1592
		track->zb.offset = idx_value;
1583
		track->zb_dirty = true;
1593
		track->zb_dirty = true;
1584
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1594
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1585
			break;
1595
		break;
1586
		case RADEON_RB3D_COLOROFFSET:
1596
	case RADEON_RB3D_COLOROFFSET:
1587
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1597
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1588
		if (r) {
1598
		if (r) {
1589
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1599
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
Line 1594... Line 1604...
1594
		track->cb[0].robj = reloc->robj;
1604
		track->cb[0].robj = reloc->robj;
1595
		track->cb[0].offset = idx_value;
1605
		track->cb[0].offset = idx_value;
1596
		track->cb_dirty = true;
1606
		track->cb_dirty = true;
1597
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1607
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1598
		break;
1608
		break;
1599
		case RADEON_PP_TXOFFSET_0:
1609
	case RADEON_PP_TXOFFSET_0:
1600
		case RADEON_PP_TXOFFSET_1:
1610
	case RADEON_PP_TXOFFSET_1:
1601
		case RADEON_PP_TXOFFSET_2:
1611
	case RADEON_PP_TXOFFSET_2:
1602
		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1612
		i = (reg - RADEON_PP_TXOFFSET_0) / 24;
1603
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1613
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1604
		if (r) {
1614
		if (r) {
1605
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1615
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1606
				  idx, reg);
1616
				  idx, reg);
Line 1649... Line 1659...
1649
		if (r) {
1659
		if (r) {
1650
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1660
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1651
				  idx, reg);
1661
				  idx, reg);
1652
			radeon_cs_dump_packet(p, pkt);
1662
			radeon_cs_dump_packet(p, pkt);
1653
			return r;
1663
			return r;
1654
			}
1664
		}
1655
		track->textures[1].cube_info[i].offset = idx_value;
1665
		track->textures[1].cube_info[i].offset = idx_value;
1656
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1666
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1657
		track->textures[1].cube_info[i].robj = reloc->robj;
1667
		track->textures[1].cube_info[i].robj = reloc->robj;
1658
		track->tex_dirty = true;
1668
		track->tex_dirty = true;
1659
		break;
1669
		break;
Line 1662... Line 1672...
1662
	case RADEON_PP_CUBIC_OFFSET_T2_2:
1672
	case RADEON_PP_CUBIC_OFFSET_T2_2:
1663
	case RADEON_PP_CUBIC_OFFSET_T2_3:
1673
	case RADEON_PP_CUBIC_OFFSET_T2_3:
1664
	case RADEON_PP_CUBIC_OFFSET_T2_4:
1674
	case RADEON_PP_CUBIC_OFFSET_T2_4:
1665
		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1675
		i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4;
1666
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1676
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1667
			if (r) {
1677
		if (r) {
1668
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1678
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1669
					  idx, reg);
1679
				  idx, reg);
1670
			radeon_cs_dump_packet(p, pkt);
1680
			radeon_cs_dump_packet(p, pkt);
1671
				return r;
1681
			return r;
1672
			}
1682
		}
1673
		track->textures[2].cube_info[i].offset = idx_value;
1683
		track->textures[2].cube_info[i].offset = idx_value;
1674
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1684
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1675
		track->textures[2].cube_info[i].robj = reloc->robj;
1685
		track->textures[2].cube_info[i].robj = reloc->robj;
1676
		track->tex_dirty = true;
1686
		track->tex_dirty = true;
1677
		break;
1687
		break;
1678
	case RADEON_RE_WIDTH_HEIGHT:
1688
	case RADEON_RE_WIDTH_HEIGHT:
1679
		track->maxy = ((idx_value >> 16) & 0x7FF);
1689
		track->maxy = ((idx_value >> 16) & 0x7FF);
1680
		track->cb_dirty = true;
1690
		track->cb_dirty = true;
1681
		track->zb_dirty = true;
1691
		track->zb_dirty = true;
1682
			break;
1692
		break;
1683
		case RADEON_RB3D_COLORPITCH:
1693
	case RADEON_RB3D_COLORPITCH:
1684
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1694
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1685
			if (r) {
1695
		if (r) {
1686
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1696
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1687
					  idx, reg);
1697
				  idx, reg);
1688
			radeon_cs_dump_packet(p, pkt);
1698
			radeon_cs_dump_packet(p, pkt);
1689
				return r;
1699
			return r;
1690
			}
1700
		}
1691
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1701
		if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
1692
			if (reloc->tiling_flags & RADEON_TILING_MACRO)
1702
			if (reloc->tiling_flags & RADEON_TILING_MACRO)
1693
				tile_flags |= RADEON_COLOR_TILE_ENABLE;
1703
				tile_flags |= RADEON_COLOR_TILE_ENABLE;
1694
			if (reloc->tiling_flags & RADEON_TILING_MICRO)
1704
			if (reloc->tiling_flags & RADEON_TILING_MICRO)
1695
				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1705
				tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
Line 1696... Line 1706...
1696
 
1706
 
1697
		tmp = idx_value & ~(0x7 << 16);
1707
			tmp = idx_value & ~(0x7 << 16);
1698
			tmp |= tile_flags;
1708
			tmp |= tile_flags;
1699
			ib[idx] = tmp;
1709
			ib[idx] = tmp;
1700
		} else
1710
		} else
Line 1748... Line 1758...
1748
			break;
1758
			break;
1749
		default:
1759
		default:
1750
			break;
1760
			break;
1751
		}
1761
		}
1752
		track->zb_dirty = true;
1762
		track->zb_dirty = true;
1753
			break;
1763
		break;
1754
		case RADEON_RB3D_ZPASS_ADDR:
1764
	case RADEON_RB3D_ZPASS_ADDR:
1755
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1765
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1756
			if (r) {
1766
		if (r) {
1757
				DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1767
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1758
					  idx, reg);
1768
				  idx, reg);
1759
			radeon_cs_dump_packet(p, pkt);
1769
			radeon_cs_dump_packet(p, pkt);
1760
				return r;
1770
			return r;
1761
			}
1771
		}
1762
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1772
		ib[idx] = idx_value + ((u32)reloc->gpu_offset);
1763
			break;
1773
		break;
1764
	case RADEON_PP_CNTL:
1774
	case RADEON_PP_CNTL:
1765
		{
1775
		{
1766
			uint32_t temp = idx_value >> 4;
1776
			uint32_t temp = idx_value >> 4;
1767
			for (i = 0; i < track->num_texture; i++)
1777
			for (i = 0; i < track->num_texture; i++)
1768
				track->textures[i].enabled = !!(temp & (1 << i));
1778
				track->textures[i].enabled = !!(temp & (1 << i));
1769
			track->tex_dirty = true;
1779
			track->tex_dirty = true;
1770
		}
1780
		}
1771
			break;
1781
		break;
1772
	case RADEON_SE_VF_CNTL:
1782
	case RADEON_SE_VF_CNTL:
1773
		track->vap_vf_cntl = idx_value;
1783
		track->vap_vf_cntl = idx_value;
1774
		break;
1784
		break;
1775
	case RADEON_SE_VTX_FMT:
1785
	case RADEON_SE_VTX_FMT:
1776
		track->vtx_size = r100_get_vtx_size(idx_value);
1786
		track->vtx_size = r100_get_vtx_size(idx_value);
Line 1908... Line 1918...
1908
	track = (struct r100_cs_track *)p->track;
1918
	track = (struct r100_cs_track *)p->track;
1909
	switch (pkt->opcode) {
1919
	switch (pkt->opcode) {
1910
	case PACKET3_3D_LOAD_VBPNTR:
1920
	case PACKET3_3D_LOAD_VBPNTR:
1911
		r = r100_packet3_load_vbpntr(p, pkt, idx);
1921
		r = r100_packet3_load_vbpntr(p, pkt, idx);
1912
		if (r)
1922
		if (r)
1913
				return r;
1923
			return r;
1914
		break;
1924
		break;
1915
	case PACKET3_INDX_BUFFER:
1925
	case PACKET3_INDX_BUFFER:
1916
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1926
		r = radeon_cs_packet_next_reloc(p, &reloc, 0);
1917
		if (r) {
1927
		if (r) {
1918
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
1928
			DRM_ERROR("No reloc for packet3 %d\n", pkt->opcode);
Line 2032... Line 2042...
2032
			return r;
2042
			return r;
2033
		}
2043
		}
2034
		p->idx += pkt.count + 2;
2044
		p->idx += pkt.count + 2;
2035
		switch (pkt.type) {
2045
		switch (pkt.type) {
2036
		case RADEON_PACKET_TYPE0:
2046
		case RADEON_PACKET_TYPE0:
2037
				if (p->rdev->family >= CHIP_R200)
2047
			if (p->rdev->family >= CHIP_R200)
2038
					r = r100_cs_parse_packet0(p, &pkt,
2048
				r = r100_cs_parse_packet0(p, &pkt,
2039
								  p->rdev->config.r100.reg_safe_bm,
2049
					p->rdev->config.r100.reg_safe_bm,
2040
								  p->rdev->config.r100.reg_safe_bm_size,
2050
					p->rdev->config.r100.reg_safe_bm_size,
2041
								  &r200_packet0_check);
2051
					&r200_packet0_check);
2042
				else
2052
			else
2043
					r = r100_cs_parse_packet0(p, &pkt,
2053
				r = r100_cs_parse_packet0(p, &pkt,
2044
								  p->rdev->config.r100.reg_safe_bm,
2054
					p->rdev->config.r100.reg_safe_bm,
2045
								  p->rdev->config.r100.reg_safe_bm_size,
2055
					p->rdev->config.r100.reg_safe_bm_size,
2046
								  &r100_packet0_check);
2056
					&r100_packet0_check);
2047
				break;
2057
			break;
2048
		case RADEON_PACKET_TYPE2:
2058
		case RADEON_PACKET_TYPE2:
2049
				break;
2059
			break;
2050
		case RADEON_PACKET_TYPE3:
2060
		case RADEON_PACKET_TYPE3:
2051
				r = r100_packet3_check(p, &pkt);
2061
			r = r100_packet3_check(p, &pkt);
2052
				break;
2062
			break;
2053
			default:
2063
		default:
2054
				DRM_ERROR("Unknown packet type %d !\n",
2064
			DRM_ERROR("Unknown packet type %d !\n",
2055
					  pkt.type);
2065
				  pkt.type);
2056
				return -EINVAL;
2066
			return -EINVAL;
2057
		}
2067
		}
2058
		if (r)
2068
		if (r)
2059
			return r;
2069
			return r;
2060
	} while (p->idx < p->chunk_ib->length_dw);
2070
	} while (p->idx < p->chunk_ib->length_dw);
2061
	return 0;
2071
	return 0;
Line 2509... Line 2519...
2509
 
2519
 
2510
	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2520
	rbbm_status = RREG32(R_000E40_RBBM_STATUS);
2511
	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2521
	if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
2512
		radeon_ring_lockup_update(rdev, ring);
2522
		radeon_ring_lockup_update(rdev, ring);
2513
		return false;
2523
		return false;
2514
		}
2524
	}
2515
	return radeon_ring_test_lockup(rdev, ring);
2525
	return radeon_ring_test_lockup(rdev, ring);
Line 2516... Line 2526...
2516
}
2526
}
2517
 
2527
 
Line 2783... Line 2793...
2783
		 */
2793
		 */
2784
		if (rdev->mc.real_vram_size == 0) {
2794
		if (rdev->mc.real_vram_size == 0) {
2785
			rdev->mc.real_vram_size = 8192 * 1024;
2795
			rdev->mc.real_vram_size = 8192 * 1024;
2786
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2796
			WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
2787
		}
2797
		}
2788
		 /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
2798
		/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - 
2789
		 * Novell bug 204882 + along with lots of ubuntu ones
2799
		 * Novell bug 204882 + along with lots of ubuntu ones
2790
		 */
2800
		 */
2791
		if (rdev->mc.aper_size > config_aper_size)
2801
		if (rdev->mc.aper_size > config_aper_size)
2792
			config_aper_size = rdev->mc.aper_size;
2802
			config_aper_size = rdev->mc.aper_size;
Line 2833... Line 2843...
2833
 * Indirect registers accessor
2843
 * Indirect registers accessor
2834
 */
2844
 */
2835
void r100_pll_errata_after_index(struct radeon_device *rdev)
2845
void r100_pll_errata_after_index(struct radeon_device *rdev)
2836
{
2846
{
2837
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2847
	if (rdev->pll_errata & CHIP_ERRATA_PLL_DUMMYREADS) {
2838
	(void)RREG32(RADEON_CLOCK_CNTL_DATA);
2848
		(void)RREG32(RADEON_CLOCK_CNTL_DATA);
2839
	(void)RREG32(RADEON_CRTC_GEN_CNTL);
2849
		(void)RREG32(RADEON_CRTC_GEN_CNTL);
2840
	}
2850
	}
2841
}
2851
}
Line 2842... Line 2852...
2842
 
2852
 
2843
static void r100_pll_errata_after_data(struct radeon_device *rdev)
2853
static void r100_pll_errata_after_data(struct radeon_device *rdev)
Line 2946... Line 2956...
2946
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2956
	seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
2947
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2957
	seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
2948
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2958
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
2949
	seq_printf(m, "%u dwords in ring\n", count);
2959
	seq_printf(m, "%u dwords in ring\n", count);
2950
	if (ring->ready) {
2960
	if (ring->ready) {
2951
	for (j = 0; j <= count; j++) {
2961
		for (j = 0; j <= count; j++) {
2952
		i = (rdp + j) & ring->ptr_mask;
2962
			i = (rdp + j) & ring->ptr_mask;
2953
		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2963
			seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
2954
	}
2964
		}
2955
	}
2965
	}
2956
	return 0;
2966
	return 0;
2957
}
2967
}
Line 3201... Line 3211...
3201
	struct drm_display_mode *mode1 = NULL;
3211
	struct drm_display_mode *mode1 = NULL;
3202
	struct drm_display_mode *mode2 = NULL;
3212
	struct drm_display_mode *mode2 = NULL;
3203
	uint32_t pixel_bytes1 = 0;
3213
	uint32_t pixel_bytes1 = 0;
3204
	uint32_t pixel_bytes2 = 0;
3214
	uint32_t pixel_bytes2 = 0;
Line -... Line 3215...
-
 
3215
 
-
 
3216
	/* Guess line buffer size to be 8192 pixels */
-
 
3217
	u32 lb_size = 8192;
3205
 
3218
 
3206
	if (!rdev->mode_info.mode_config_initialized)
3219
	if (!rdev->mode_info.mode_config_initialized)
Line 3207... Line 3220...
3207
		return;
3220
		return;
Line 3208... Line 3221...
3208
 
3221
 
3209
	radeon_update_display_priority(rdev);
3222
	radeon_update_display_priority(rdev);
3210
 
3223
 
3211
	if (rdev->mode_info.crtcs[0]->base.enabled) {
3224
	if (rdev->mode_info.crtcs[0]->base.enabled) {
3212
		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3225
		mode1 = &rdev->mode_info.crtcs[0]->base.mode;
3213
		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
3226
		pixel_bytes1 = rdev->mode_info.crtcs[0]->base.primary->fb->bits_per_pixel / 8;
3214
	}
3227
	}
3215
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3228
	if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
3216
	if (rdev->mode_info.crtcs[1]->base.enabled) {
3229
		if (rdev->mode_info.crtcs[1]->base.enabled) {
3217
		mode2 = &rdev->mode_info.crtcs[1]->base.mode;
3230
			mode2 = &rdev->mode_info.crtcs[1]->base.mode;
Line 3218... Line 3231...
3218
			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
3231
			pixel_bytes2 = rdev->mode_info.crtcs[1]->base.primary->fb->bits_per_pixel / 8;
3219
	}
3232
		}
3220
	}
3233
	}
Line 3615... Line 3628...
3615
		}
3628
		}
Line 3616... Line 3629...
3616
 
3629
 
3617
		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3630
		DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n",
3618
			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
3631
			  (unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL));
-
 
3632
	}
-
 
3633
 
-
 
3634
	/* Save number of lines the linebuffer leads before the scanout */
-
 
3635
	if (mode1)
-
 
3636
	    rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay);
-
 
3637
 
-
 
3638
	if (mode2)
3619
	}
3639
	    rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay);
Line 3620... Line 3640...
3620
}
3640
}
3621
 
3641
 
3622
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
3642
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
Line 3890... Line 3910...
3890
	}
3910
	}
Line 3891... Line 3911...
3891
 
3911
 
3892
	r100_irq_set(rdev);
3912
	r100_irq_set(rdev);
3893
	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3913
	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
3894
	/* 1M ring buffer */
3914
	/* 1M ring buffer */
3895
   r = r100_cp_init(rdev, 1024 * 1024);
3915
	r = r100_cp_init(rdev, 1024 * 1024);
3896
   if (r) {
3916
	if (r) {
3897
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3917
		dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
3898
       return r;
3918
		return r;
Line 3899... Line 3919...
3899
   }
3919
	}
3900
 
3920
 
3901
	r = radeon_ib_pool_init(rdev);
3921
	r = radeon_ib_pool_init(rdev);
3902
	if (r) {
3922
	if (r) {
3903
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
3923
		dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
Line 3904... Line 3924...
3904
		return r;
3924
		return r;
3905
	}
3925
	}
Line -... Line 3926...
-
 
3926
 
-
 
3927
	return 0;
-
 
3928
}
-
 
3929
 
-
 
3930
void r100_fini(struct radeon_device *rdev)
-
 
3931
{
-
 
3932
	radeon_pm_fini(rdev);
-
 
3933
	r100_cp_fini(rdev);
-
 
3934
	radeon_wb_fini(rdev);
-
 
3935
	radeon_ib_pool_fini(rdev);
-
 
3936
	radeon_gem_fini(rdev);
-
 
3937
	if (rdev->flags & RADEON_IS_PCI)
-
 
3938
		r100_pci_gart_fini(rdev);
-
 
3939
	radeon_agp_fini(rdev);
-
 
3940
	radeon_irq_kms_fini(rdev);
-
 
3941
	radeon_fence_driver_fini(rdev);
-
 
3942
	radeon_bo_fini(rdev);
-
 
3943
	radeon_atombios_fini(rdev);
3906
 
3944
	kfree(rdev->bios);
3907
	return 0;
3945
	rdev->bios = NULL;
3908
}
3946
}
3909
 
3947
 
3910
/*
3948
/*
Line 4004... Line 4042...
4004
	rdev->accel_working = true;
4042
	rdev->accel_working = true;
4005
	r = r100_startup(rdev);
4043
	r = r100_startup(rdev);
4006
	if (r) {
4044
	if (r) {
4007
		/* Somethings want wront with the accel init stop accel */
4045
		/* Somethings want wront with the accel init stop accel */
4008
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
4046
		dev_err(rdev->dev, "Disabling GPU acceleration\n");
-
 
4047
		r100_cp_fini(rdev);
-
 
4048
		radeon_wb_fini(rdev);
-
 
4049
		radeon_ib_pool_fini(rdev);
-
 
4050
		radeon_irq_kms_fini(rdev);
4009
		if (rdev->flags & RADEON_IS_PCI)
4051
		if (rdev->flags & RADEON_IS_PCI)
4010
			r100_pci_gart_fini(rdev);
4052
			r100_pci_gart_fini(rdev);
4011
		rdev->accel_working = false;
4053
		rdev->accel_working = false;
4012
	}
4054
	}
4013
	return 0;
4055
	return 0;
4014
}
4056
}
Line -... Line 4057...
-
 
4057
 
-
 
4058
uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg)
-
 
4059
{
-
 
4060
	unsigned long flags;
-
 
4061
	uint32_t ret;
-
 
4062
 
-
 
4063
	spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
-
 
4064
	writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-
 
4065
	ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
4066
	spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
 
4067
	return ret;
-
 
4068
}
-
 
4069
 
-
 
4070
void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
 
4071
{
-
 
4072
	unsigned long flags;
-
 
4073
 
-
 
4074
	spin_lock_irqsave(&rdev->mmio_idx_lock, flags);
-
 
4075
	writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX);
-
 
4076
	writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA);
-
 
4077
	spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags);
-
 
4078
}
4015
 
4079
 
4016
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4080
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg)
4017
{
4081
{
4018
	if (reg < rdev->rio_mem_size)
4082
	if (reg < rdev->rio_mem_size)
4019
		return ioread32(rdev->rio_mem + reg);
4083
		return ioread32(rdev->rio_mem + reg);