Subversion Repositories Kolibri OS

Rev

Rev 1129 | Rev 1221 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1129 Rev 1179
Line 23... Line 23...
23
 *
23
 *
24
 * Authors: Dave Airlie
24
 * Authors: Dave Airlie
25
 *          Alex Deucher
25
 *          Alex Deucher
26
 *          Jerome Glisse
26
 *          Jerome Glisse
27
 */
27
 */
28
//#include 
28
#include 
29
#include "drmP.h"
29
#include "drmP.h"
30
#include "drm.h"
30
#include "drm.h"
31
#include "radeon_reg.h"
31
#include "radeon_reg.h"
32
#include "radeon.h"
32
#include "radeon.h"
-
 
33
#include "radeon_drm.h"
-
 
34
 
-
 
35
#include "r300d.h"
-
 
36
 
-
 
37
#include "r300_reg_safe.h"
Line 33... Line 38...
33
 
38
 
34
/* r300,r350,rv350,rv370,rv380 depends on : */
39
/* r300,r350,rv350,rv370,rv380 depends on : */
35
void r100_hdp_reset(struct radeon_device *rdev);
40
void r100_hdp_reset(struct radeon_device *rdev);
36
int r100_cp_reset(struct radeon_device *rdev);
41
int r100_cp_reset(struct radeon_device *rdev);
37
int r100_rb2d_reset(struct radeon_device *rdev);
42
int r100_rb2d_reset(struct radeon_device *rdev);
38
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
43
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
39
int r100_pci_gart_enable(struct radeon_device *rdev);
-
 
40
void r100_pci_gart_disable(struct radeon_device *rdev);
44
int r100_pci_gart_enable(struct radeon_device *rdev);
41
void r100_mc_setup(struct radeon_device *rdev);
45
void r100_mc_setup(struct radeon_device *rdev);
42
void r100_mc_disable_clients(struct radeon_device *rdev);
46
void r100_mc_disable_clients(struct radeon_device *rdev);
43
int r100_gui_wait_for_idle(struct radeon_device *rdev);
47
int r100_gui_wait_for_idle(struct radeon_device *rdev);
44
int r100_cs_packet_parse(struct radeon_cs_parser *p,
48
int r100_cs_packet_parse(struct radeon_cs_parser *p,
45
			 struct radeon_cs_packet *pkt,
49
			 struct radeon_cs_packet *pkt,
46
			 unsigned idx);
50
			 unsigned idx);
47
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
-
 
48
			      struct radeon_cs_reloc **cs_reloc);
51
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p);
49
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
52
int r100_cs_parse_packet0(struct radeon_cs_parser *p,
50
			  struct radeon_cs_packet *pkt,
53
			  struct radeon_cs_packet *pkt,
51
			  const unsigned *auth, unsigned n,
54
			  const unsigned *auth, unsigned n,
52
			  radeon_packet0_check_t check);
-
 
53
void r100_cs_dump_packet(struct radeon_cs_parser *p,
-
 
54
			 struct radeon_cs_packet *pkt);
55
			  radeon_packet0_check_t check);
55
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
56
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
56
					 struct radeon_cs_packet *pkt,
57
					 struct radeon_cs_packet *pkt,
Line 57... Line 58...
57
					 struct radeon_object *robj);
58
					 struct radeon_object *robj);
Line 78... Line 79...
78
	for (i = 0; i < 2; i++) {
79
	for (i = 0; i < 2; i++) {
79
		tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
80
		tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
80
		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
81
		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB);
81
		(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
82
		(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
82
		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
83
		WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
-
 
84
	}
83
		mb();
85
		mb();
84
	}
86
}
-
 
87
 
-
 
88
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
-
 
89
{
-
 
90
	void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-
 
91
 
-
 
92
	if (i < 0 || i > rdev->gart.num_gpu_pages) {
-
 
93
		return -EINVAL;
-
 
94
	}
-
 
95
	addr = (lower_32_bits(addr) >> 8) |
-
 
96
	       ((upper_32_bits(addr) & 0xff) << 24) |
-
 
97
	       0xc;
-
 
98
	/* on x86 we want this to be CPU endian, on powerpc
-
 
99
	 * on powerpc without HW swappers, it'll get swapped on way
-
 
100
	 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
-
 
101
	writel(addr, ((void __iomem *)ptr) + (i * 4));
-
 
102
	return 0;
85
}
103
}
Line 86... Line 104...
86
 
104
 
87
int rv370_pcie_gart_enable(struct radeon_device *rdev)
105
int rv370_pcie_gart_init(struct radeon_device *rdev)
88
{
-
 
89
	uint32_t table_addr;
-
 
90
	uint32_t tmp;
106
{
Line -... Line 107...
-
 
107
	int r;
-
 
108
 
-
 
109
	if (rdev->gart.table.vram.robj) {
-
 
110
		WARN(1, "RV370 PCIE GART already initialized.\n");
91
	int r;
111
		return 0;
92
 
112
	}
93
	/* Initialize common gart structure */
113
	/* Initialize common gart structure */
94
	r = radeon_gart_init(rdev);
114
	r = radeon_gart_init(rdev);
95
	if (r) {
-
 
96
		return r;
115
	if (r)
97
	}
116
		return r;
98
	r = rv370_debugfs_pcie_gart_info_init(rdev);
117
	r = rv370_debugfs_pcie_gart_info_init(rdev);
99
	if (r) {
-
 
100
		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
118
	if (r)
-
 
119
		DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
-
 
120
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
101
	}
121
	rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-
 
122
	rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
-
 
123
	return radeon_gart_table_vram_alloc(rdev);
-
 
124
}
-
 
125
 
-
 
126
int rv370_pcie_gart_enable(struct radeon_device *rdev)
-
 
127
{
102
	rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
128
	uint32_t table_addr;
-
 
129
	uint32_t tmp;
-
 
130
	int r;
-
 
131
 
103
	r = radeon_gart_table_vram_alloc(rdev);
132
	if (rdev->gart.table.vram.robj == NULL) {
104
	if (r) {
133
		dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
-
 
134
		return -EINVAL;
-
 
135
	}
-
 
136
	r = radeon_gart_table_vram_pin(rdev);
105
		return r;
137
	if (r)
106
	}
138
		return r;
107
	/* discard memory request outside of configured range */
139
	/* discard memory request outside of configured range */
108
	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
140
	tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
109
	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
141
	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
Line 123... Line 155...
123
	tmp |= RADEON_PCIE_TX_GART_EN;
155
	tmp |= RADEON_PCIE_TX_GART_EN;
124
	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
156
	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
125
	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
157
	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
126
	rv370_pcie_gart_tlb_flush(rdev);
158
	rv370_pcie_gart_tlb_flush(rdev);
127
	DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
159
	DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
128
		 rdev->mc.gtt_size >> 20, table_addr);
160
		 (unsigned)(rdev->mc.gtt_size >> 20), table_addr);
129
	rdev->gart.ready = true;
161
	rdev->gart.ready = true;
130
	return 0;
162
	return 0;
131
}
163
}
Line 132... Line 164...
132
 
164
 
Line 141... Line 173...
141
//       radeon_object_kunmap(rdev->gart.table.vram.robj);
173
//       radeon_object_kunmap(rdev->gart.table.vram.robj);
142
//       radeon_object_unpin(rdev->gart.table.vram.robj);
174
//       radeon_object_unpin(rdev->gart.table.vram.robj);
143
	}
175
	}
144
}
176
}
Line 145... Line 177...
145
 
177
 
146
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
178
void rv370_pcie_gart_fini(struct radeon_device *rdev)
147
{
-
 
148
    void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
-
 
149
 
-
 
150
    if (i < 0 || i > rdev->gart.num_gpu_pages) {
-
 
151
        return -EINVAL;
-
 
152
    }
-
 
153
	addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC;
-
 
154
    writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4));
-
 
155
    return 0;
-
 
156
}
-
 
157
 
-
 
158
int r300_gart_enable(struct radeon_device *rdev)
-
 
159
{
-
 
160
#if __OS_HAS_AGP
-
 
161
	if (rdev->flags & RADEON_IS_AGP) {
-
 
162
		if (rdev->family > CHIP_RV350) {
179
{
163
			rv370_pcie_gart_disable(rdev);
-
 
164
		} else {
180
			rv370_pcie_gart_disable(rdev);
165
			r100_pci_gart_disable(rdev);
-
 
166
		}
-
 
167
		return 0;
-
 
168
	}
-
 
169
#endif
-
 
170
	if (rdev->flags & RADEON_IS_PCIE) {
-
 
171
		rdev->asic->gart_disable = &rv370_pcie_gart_disable;
-
 
172
		rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
-
 
173
		rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
181
	radeon_gart_table_vram_free(rdev);
174
		return rv370_pcie_gart_enable(rdev);
-
 
175
	}
-
 
176
    return r100_pci_gart_enable(rdev);
182
	radeon_gart_fini(rdev);
Line 177... Line -...
177
}
-
 
178
 
183
}
179
 
184
 
180
/*
185
/*
181
 * MC
186
 * MC
182
 */
187
 */
Line 195... Line 200...
195
	}
200
	}
Line 196... Line 201...
196
 
201
 
197
	/* Setup GPU memory space */
202
	/* Setup GPU memory space */
198
	rdev->mc.vram_location = 0xFFFFFFFFUL;
203
	rdev->mc.vram_location = 0xFFFFFFFFUL;
199
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
-
 
200
	if (rdev->flags & RADEON_IS_AGP) {
-
 
201
		r = radeon_agp_init(rdev);
-
 
202
		if (r) {
-
 
203
			printk(KERN_WARNING "[drm] Disabling AGP\n");
-
 
204
			rdev->flags &= ~RADEON_IS_AGP;
-
 
205
			rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-
 
206
		} else {
-
 
207
			rdev->mc.gtt_location = rdev->mc.agp_base;
-
 
208
		}
-
 
209
	}
204
	rdev->mc.gtt_location = 0xFFFFFFFFUL;
210
	r = radeon_mc_setup(rdev);
205
	r = radeon_mc_setup(rdev);
211
	if (r) {
206
	if (r) {
212
		return r;
207
		return r;
Line 222... Line 217...
222
	return 0;
217
	return 0;
223
}
218
}
Line 224... Line 219...
224
 
219
 
225
void r300_mc_fini(struct radeon_device *rdev)
220
void r300_mc_fini(struct radeon_device *rdev)
226
{
-
 
227
	if (rdev->flags & RADEON_IS_PCIE) {
-
 
228
		rv370_pcie_gart_disable(rdev);
-
 
229
		radeon_gart_table_vram_free(rdev);
-
 
230
	} else {
-
 
231
		r100_pci_gart_disable(rdev);
-
 
232
		radeon_gart_table_ram_free(rdev);
-
 
233
	}
-
 
234
	radeon_gart_fini(rdev);
221
{
Line 235... Line 222...
235
}
222
}
236
 
223
 
Line 442... Line 429...
442
		rdev->num_gb_pipes = 2;
429
		rdev->num_gb_pipes = 2;
443
	} else {
430
	} else {
444
		/* rv350,rv370,rv380 */
431
		/* rv350,rv370,rv380 */
445
		rdev->num_gb_pipes = 1;
432
		rdev->num_gb_pipes = 1;
446
	}
433
	}
-
 
434
	rdev->num_z_pipes = 1;
447
	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
435
	gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16);
448
	switch (rdev->num_gb_pipes) {
436
	switch (rdev->num_gb_pipes) {
449
	case 2:
437
	case 2:
450
		gb_tile_config |= R300_PIPE_COUNT_R300;
438
		gb_tile_config |= R300_PIPE_COUNT_R300;
451
		break;
439
		break;
Line 480... Line 468...
480
	}
468
	}
481
	if (r300_mc_wait_for_idle(rdev)) {
469
	if (r300_mc_wait_for_idle(rdev)) {
482
		printk(KERN_WARNING "Failed to wait MC idle while "
470
		printk(KERN_WARNING "Failed to wait MC idle while "
483
		       "programming pipes. Bad things might happen.\n");
471
		       "programming pipes. Bad things might happen.\n");
484
	}
472
	}
485
	DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes);
473
	DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
-
 
474
		 rdev->num_gb_pipes, rdev->num_z_pipes);
486
}
475
}
Line 487... Line 476...
487
 
476
 
488
int r300_ga_reset(struct radeon_device *rdev)
477
int r300_ga_reset(struct radeon_device *rdev)
489
{
478
{
Line 581... Line 570...
581
	if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
570
	if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
582
		rdev->mc.vram_width = 128;
571
		rdev->mc.vram_width = 128;
583
	} else {
572
	} else {
584
		rdev->mc.vram_width = 64;
573
		rdev->mc.vram_width = 64;
585
	}
574
	}
586
	rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
-
 
Line 587... Line -...
587
 
-
 
588
	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
575
 
589
	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
576
	r100_vram_init_sizes(rdev);
Line 590... Line 577...
590
}
577
}
591
 
-
 
592
 
-
 
593
/*
-
 
594
 * Indirect registers accessor
-
 
595
 */
-
 
596
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg)
-
 
597
{
-
 
598
	uint32_t r;
-
 
599
 
-
 
600
	WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
-
 
601
	(void)RREG32(RADEON_PCIE_INDEX);
-
 
602
	r = RREG32(RADEON_PCIE_DATA);
-
 
603
	return r;
-
 
604
}
-
 
605
 
-
 
606
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-
 
607
{
-
 
608
	WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff));
-
 
609
	(void)RREG32(RADEON_PCIE_INDEX);
-
 
610
	WREG32(RADEON_PCIE_DATA, (v));
-
 
611
	(void)RREG32(RADEON_PCIE_DATA);
-
 
612
}
578
 
613
 
579
 
Line 614... Line 580...
614
/*
580
/*
615
 * PCIE Lanes
581
 * PCIE Lanes
Line 720... Line 686...
720
 
686
 
721
#if 0
687
#if 0
722
/*
688
/*
723
 * CS functions
689
 * CS functions
724
 */
-
 
725
struct r300_cs_track_cb {
-
 
726
	struct radeon_object	*robj;
-
 
727
	unsigned		pitch;
-
 
728
	unsigned		cpp;
-
 
729
	unsigned		offset;
-
 
730
};
-
 
731
 
-
 
732
struct r300_cs_track_array {
-
 
733
	struct radeon_object	*robj;
-
 
734
	unsigned		esize;
-
 
735
};
-
 
736
 
-
 
737
struct r300_cs_track_texture {
-
 
738
	struct radeon_object	*robj;
-
 
739
	unsigned		pitch;
-
 
740
	unsigned		width;
-
 
741
	unsigned		height;
-
 
742
	unsigned		num_levels;
-
 
743
	unsigned		cpp;
-
 
744
	unsigned		tex_coord_type;
-
 
745
	unsigned		txdepth;
-
 
746
	unsigned		width_11;
-
 
747
	unsigned		height_11;
-
 
748
	bool			use_pitch;
-
 
749
	bool			enabled;
-
 
750
	bool			roundup_w;
-
 
751
	bool			roundup_h;
-
 
752
};
-
 
753
 
-
 
754
struct r300_cs_track {
-
 
755
	unsigned			num_cb;
-
 
756
	unsigned			maxy;
-
 
757
	unsigned			vtx_size;
-
 
758
	unsigned			vap_vf_cntl;
-
 
759
	unsigned			immd_dwords;
-
 
760
	unsigned			num_arrays;
-
 
761
	unsigned			max_indx;
-
 
762
	struct r300_cs_track_array	arrays[11];
-
 
763
	struct r300_cs_track_cb 	cb[4];
-
 
764
	struct r300_cs_track_cb 	zb;
-
 
765
	struct r300_cs_track_texture	textures[16];
-
 
766
	bool				z_enabled;
-
 
767
};
-
 
768
 
-
 
769
static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t)
-
 
770
{
-
 
771
	DRM_ERROR("pitch                      %d\n", t->pitch);
-
 
772
	DRM_ERROR("width                      %d\n", t->width);
-
 
773
	DRM_ERROR("height                     %d\n", t->height);
-
 
774
	DRM_ERROR("num levels                 %d\n", t->num_levels);
-
 
775
	DRM_ERROR("depth                      %d\n", t->txdepth);
-
 
776
	DRM_ERROR("bpp                        %d\n", t->cpp);
-
 
777
	DRM_ERROR("coordinate type            %d\n", t->tex_coord_type);
-
 
778
	DRM_ERROR("width round to power of 2  %d\n", t->roundup_w);
-
 
779
	DRM_ERROR("height round to power of 2 %d\n", t->roundup_h);
-
 
780
}
-
 
781
 
-
 
782
static inline int r300_cs_track_texture_check(struct radeon_device *rdev,
-
 
783
					      struct r300_cs_track *track)
-
 
784
{
-
 
785
	struct radeon_object *robj;
-
 
786
	unsigned long size;
-
 
787
	unsigned u, i, w, h;
-
 
788
 
-
 
789
	for (u = 0; u < 16; u++) {
-
 
790
		if (!track->textures[u].enabled)
-
 
791
			continue;
-
 
792
		robj = track->textures[u].robj;
-
 
793
		if (robj == NULL) {
-
 
794
			DRM_ERROR("No texture bound to unit %u\n", u);
-
 
795
			return -EINVAL;
-
 
796
		}
-
 
797
		size = 0;
-
 
798
		for (i = 0; i <= track->textures[u].num_levels; i++) {
-
 
799
			if (track->textures[u].use_pitch) {
-
 
800
				w = track->textures[u].pitch / (1 << i);
-
 
801
			} else {
-
 
802
				w = track->textures[u].width / (1 << i);
-
 
803
				if (rdev->family >= CHIP_RV515)
-
 
804
					w |= track->textures[u].width_11;
-
 
805
				if (track->textures[u].roundup_w)
-
 
806
					w = roundup_pow_of_two(w);
-
 
807
			}
-
 
808
			h = track->textures[u].height / (1 << i);
-
 
809
			if (rdev->family >= CHIP_RV515)
-
 
810
				h |= track->textures[u].height_11;
-
 
811
			if (track->textures[u].roundup_h)
-
 
812
				h = roundup_pow_of_two(h);
-
 
813
			size += w * h;
-
 
814
		}
-
 
815
		size *= track->textures[u].cpp;
-
 
816
		switch (track->textures[u].tex_coord_type) {
-
 
817
		case 0:
-
 
818
			break;
-
 
819
		case 1:
-
 
820
			size *= (1 << track->textures[u].txdepth);
-
 
821
			break;
-
 
822
		case 2:
-
 
823
			size *= 6;
-
 
824
			break;
-
 
825
		default:
-
 
826
			DRM_ERROR("Invalid texture coordinate type %u for unit "
-
 
827
				  "%u\n", track->textures[u].tex_coord_type, u);
-
 
828
			return -EINVAL;
-
 
829
		}
-
 
830
		if (size > radeon_object_size(robj)) {
-
 
831
			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
-
 
832
				  "%lu\n", u, size, radeon_object_size(robj));
-
 
833
			r300_cs_track_texture_print(&track->textures[u]);
-
 
834
			return -EINVAL;
-
 
835
		}
-
 
836
	}
-
 
837
	return 0;
-
 
838
}
-
 
839
 
-
 
840
int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track)
-
 
841
{
-
 
842
	unsigned i;
-
 
843
	unsigned long size;
-
 
844
	unsigned prim_walk;
-
 
845
	unsigned nverts;
-
 
846
 
-
 
847
	for (i = 0; i < track->num_cb; i++) {
-
 
848
		if (track->cb[i].robj == NULL) {
-
 
849
			DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
-
 
850
			return -EINVAL;
-
 
851
		}
-
 
852
		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
-
 
853
		size += track->cb[i].offset;
-
 
854
		if (size > radeon_object_size(track->cb[i].robj)) {
-
 
855
			DRM_ERROR("[drm] Buffer too small for color buffer %d "
-
 
856
				  "(need %lu have %lu) !\n", i, size,
-
 
857
				  radeon_object_size(track->cb[i].robj));
-
 
858
			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
-
 
859
				  i, track->cb[i].pitch, track->cb[i].cpp,
-
 
860
				  track->cb[i].offset, track->maxy);
-
 
861
			return -EINVAL;
-
 
862
		}
-
 
863
	}
-
 
864
	if (track->z_enabled) {
-
 
865
		if (track->zb.robj == NULL) {
-
 
866
			DRM_ERROR("[drm] No buffer for z buffer !\n");
-
 
867
			return -EINVAL;
-
 
868
		}
-
 
869
		size = track->zb.pitch * track->zb.cpp * track->maxy;
-
 
870
		size += track->zb.offset;
-
 
871
		if (size > radeon_object_size(track->zb.robj)) {
-
 
872
			DRM_ERROR("[drm] Buffer too small for z buffer "
-
 
873
				  "(need %lu have %lu) !\n", size,
-
 
874
				  radeon_object_size(track->zb.robj));
-
 
875
			return -EINVAL;
-
 
876
		}
-
 
877
	}
-
 
878
	prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
-
 
879
	nverts = (track->vap_vf_cntl >> 16) & 0xFFFF;
-
 
880
	switch (prim_walk) {
-
 
881
	case 1:
-
 
882
		for (i = 0; i < track->num_arrays; i++) {
-
 
883
			size = track->arrays[i].esize * track->max_indx * 4;
-
 
884
			if (track->arrays[i].robj == NULL) {
-
 
885
				DRM_ERROR("(PW %u) Vertex array %u no buffer "
-
 
886
					  "bound\n", prim_walk, i);
-
 
887
				return -EINVAL;
-
 
888
			}
-
 
889
			if (size > radeon_object_size(track->arrays[i].robj)) {
-
 
890
				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-
 
891
					   "have %lu dwords\n", prim_walk, i,
-
 
892
					   size >> 2,
-
 
893
					   radeon_object_size(track->arrays[i].robj) >> 2);
-
 
894
				DRM_ERROR("Max indices %u\n", track->max_indx);
-
 
895
				return -EINVAL;
-
 
896
			}
-
 
897
		}
-
 
898
		break;
-
 
899
	case 2:
-
 
900
		for (i = 0; i < track->num_arrays; i++) {
-
 
901
			size = track->arrays[i].esize * (nverts - 1) * 4;
-
 
902
			if (track->arrays[i].robj == NULL) {
-
 
903
				DRM_ERROR("(PW %u) Vertex array %u no buffer "
-
 
904
					  "bound\n", prim_walk, i);
-
 
905
				return -EINVAL;
-
 
906
			}
-
 
907
			if (size > radeon_object_size(track->arrays[i].robj)) {
-
 
908
				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-
 
909
					   "have %lu dwords\n", prim_walk, i, size >> 2,
-
 
910
					   radeon_object_size(track->arrays[i].robj) >> 2);
-
 
911
				return -EINVAL;
-
 
912
			}
-
 
913
		}
-
 
914
		break;
-
 
915
	case 3:
-
 
916
		size = track->vtx_size * nverts;
-
 
917
		if (size != track->immd_dwords) {
-
 
918
			DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
-
 
919
				  track->immd_dwords, size);
-
 
920
			DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
-
 
921
				  nverts, track->vtx_size);
-
 
922
			return -EINVAL;
-
 
923
		}
-
 
924
		break;
-
 
925
	default:
-
 
926
		DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
-
 
927
			  prim_walk);
-
 
928
		return -EINVAL;
-
 
929
	}
-
 
930
	return r300_cs_track_texture_check(rdev, track);
-
 
931
}
-
 
932
 
-
 
933
static inline void r300_cs_track_clear(struct r300_cs_track *track)
-
 
934
{
-
 
935
	unsigned i;
-
 
936
 
-
 
937
	track->num_cb = 4;
-
 
938
	track->maxy = 4096;
-
 
939
	for (i = 0; i < track->num_cb; i++) {
-
 
940
		track->cb[i].robj = NULL;
-
 
941
		track->cb[i].pitch = 8192;
-
 
942
		track->cb[i].cpp = 16;
-
 
943
		track->cb[i].offset = 0;
-
 
944
	}
-
 
945
	track->z_enabled = true;
-
 
946
	track->zb.robj = NULL;
-
 
947
	track->zb.pitch = 8192;
-
 
948
	track->zb.cpp = 4;
-
 
949
	track->zb.offset = 0;
-
 
950
	track->vtx_size = 0x7F;
-
 
951
	track->immd_dwords = 0xFFFFFFFFUL;
-
 
952
	track->num_arrays = 11;
-
 
953
	track->max_indx = 0x00FFFFFFUL;
-
 
954
	for (i = 0; i < track->num_arrays; i++) {
-
 
955
		track->arrays[i].robj = NULL;
-
 
956
		track->arrays[i].esize = 0x7F;
-
 
957
	}
-
 
958
	for (i = 0; i < 16; i++) {
-
 
959
		track->textures[i].pitch = 16536;
-
 
960
		track->textures[i].width = 16536;
-
 
961
		track->textures[i].height = 16536;
-
 
962
		track->textures[i].width_11 = 1 << 11;
-
 
963
		track->textures[i].height_11 = 1 << 11;
-
 
964
		track->textures[i].num_levels = 12;
-
 
965
		track->textures[i].txdepth = 16;
-
 
966
		track->textures[i].cpp = 64;
-
 
967
		track->textures[i].tex_coord_type = 1;
-
 
968
		track->textures[i].robj = NULL;
-
 
969
		/* CS IB emission code makes sure texture unit are disabled */
-
 
970
		track->textures[i].enabled = false;
-
 
971
		track->textures[i].roundup_w = true;
-
 
972
		track->textures[i].roundup_h = true;
-
 
973
	}
-
 
974
}
-
 
975
 
-
 
976
#endif
-
 
977
 
-
 
978
static const unsigned r300_reg_safe_bm[159] = {
-
 
979
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
980
	0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF,
-
 
981
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
982
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
983
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
984
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
985
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
986
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
987
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
988
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
989
	0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
-
 
990
	0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
-
 
991
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
992
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
993
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
-
 
994
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
995
	0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
-
 
996
	0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
997
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
998
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
999
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1000
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1001
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1002
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1003
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1004
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1005
	0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1006
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1007
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1008
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1009
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1010
	0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
-
 
1011
	0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
-
 
1012
	0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
-
 
1013
	0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
-
 
1014
	0x00000000, 0x0000C100, 0x00000000, 0x00000000,
-
 
1015
	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-
 
1016
	0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
-
 
1017
	0x00000000, 0x00000000, 0x00000000, 0x00000000,
-
 
1018
	0x0003FC01, 0xFFFFFFF8, 0xFE800B19,
-
 
1019
};
-
 
1020
 
-
 
1021
#if 0
-
 
1022
 
690
 */
1023
static int r300_packet0_check(struct radeon_cs_parser *p,
691
static int r300_packet0_check(struct radeon_cs_parser *p,
1024
		struct radeon_cs_packet *pkt,
692
		struct radeon_cs_packet *pkt,
1025
		unsigned idx, unsigned reg)
693
		unsigned idx, unsigned reg)
1026
{
694
{
1027
	struct radeon_cs_chunk *ib_chunk;
695
	struct radeon_cs_chunk *ib_chunk;
1028
	struct radeon_cs_reloc *reloc;
696
	struct radeon_cs_reloc *reloc;
1029
	struct r300_cs_track *track;
697
	struct r100_cs_track *track;
1030
	volatile uint32_t *ib;
698
	volatile uint32_t *ib;
1031
	uint32_t tmp;
699
	uint32_t tmp, tile_flags = 0;
1032
	unsigned i;
700
	unsigned i;
Line 1033... Line 701...
1033
	int r;
701
	int r;
1034
 
702
 
1035
	ib = p->ib->ptr;
703
	ib = p->ib->ptr;
1036
	ib_chunk = &p->chunks[p->chunk_ib_idx];
704
	ib_chunk = &p->chunks[p->chunk_ib_idx];
1037
	track = (struct r300_cs_track*)p->track;
705
	track = (struct r100_cs_track *)p->track;
1038
	switch(reg) {
706
	switch(reg) {
1039
	case RADEON_DST_PITCH_OFFSET:
707
	case AVIVO_D1MODE_VLINE_START_END:
1040
	case RADEON_SRC_PITCH_OFFSET:
708
	case RADEON_CRTC_GUI_TRIG_VLINE:
1041
		r = r100_cs_packet_next_reloc(p, &reloc);
709
		r = r100_cs_packet_parse_vline(p);
1042
		if (r) {
710
		if (r) {
1043
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
711
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1044
					idx, reg);
712
					idx, reg);
1045
			r100_cs_dump_packet(p, pkt);
713
			r100_cs_dump_packet(p, pkt);
-
 
714
			return r;
1046
			return r;
715
		}
1047
		}
716
		break;
1048
		tmp = ib_chunk->kdata[idx] & 0x003fffff;
717
	case RADEON_DST_PITCH_OFFSET:
-
 
718
	case RADEON_SRC_PITCH_OFFSET:
-
 
719
		r = r100_reloc_pitch_offset(p, pkt, idx, reg);
1049
		tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
720
		if (r)
1050
		ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp;
721
			return r;
1051
		break;
722
		break;
1052
	case R300_RB3D_COLOROFFSET0:
723
	case R300_RB3D_COLOROFFSET0:
1053
	case R300_RB3D_COLOROFFSET1:
724
	case R300_RB3D_COLOROFFSET1:
Line 1134... Line 805...
1134
	case 0x4E44:
805
	case 0x4E44:
1135
		/* RB3D_COLORPITCH0 */
806
		/* RB3D_COLORPITCH0 */
1136
		/* RB3D_COLORPITCH1 */
807
		/* RB3D_COLORPITCH1 */
1137
		/* RB3D_COLORPITCH2 */
808
		/* RB3D_COLORPITCH2 */
1138
		/* RB3D_COLORPITCH3 */
809
		/* RB3D_COLORPITCH3 */
-
 
810
		r = r100_cs_packet_next_reloc(p, &reloc);
-
 
811
		if (r) {
-
 
812
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
813
				  idx, reg);
-
 
814
			r100_cs_dump_packet(p, pkt);
-
 
815
			return r;
-
 
816
		}
-
 
817
 
-
 
818
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-
 
819
			tile_flags |= R300_COLOR_TILE_ENABLE;
-
 
820
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-
 
821
			tile_flags |= R300_COLOR_MICROTILE_ENABLE;
-
 
822
 
-
 
823
		tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
-
 
824
		tmp |= tile_flags;
-
 
825
		ib[idx] = tmp;
-
 
826
 
1139
		i = (reg - 0x4E38) >> 2;
827
		i = (reg - 0x4E38) >> 2;
1140
		track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
828
		track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
1141
		switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
829
		switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
1142
		case 9:
830
		case 9:
1143
		case 11:
831
		case 11:
Line 1189... Line 877...
1189
			return -EINVAL;
877
			return -EINVAL;
1190
		}
878
		}
1191
		break;
879
		break;
1192
	case 0x4F24:
880
	case 0x4F24:
1193
		/* ZB_DEPTHPITCH */
881
		/* ZB_DEPTHPITCH */
-
 
882
		r = r100_cs_packet_next_reloc(p, &reloc);
-
 
883
		if (r) {
-
 
884
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
885
				  idx, reg);
-
 
886
			r100_cs_dump_packet(p, pkt);
-
 
887
			return r;
-
 
888
		}
-
 
889
 
-
 
890
		if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
-
 
891
			tile_flags |= R300_DEPTHMACROTILE_ENABLE;
-
 
892
		if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
-
 
893
			tile_flags |= R300_DEPTHMICROTILE_TILED;;
-
 
894
 
-
 
895
		tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
-
 
896
		tmp |= tile_flags;
-
 
897
		ib[idx] = tmp;
-
 
898
 
1194
		track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
899
		track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
1195
		break;
900
		break;
1196
	case 0x4104:
901
	case 0x4104:
1197
		for (i = 0; i < 16; i++) {
902
		for (i = 0; i < 16; i++) {
1198
			bool enabled;
903
			bool enabled;
Line 1220... Line 925...
1220
		/* TX_FORMAT1_[0-15] */
925
		/* TX_FORMAT1_[0-15] */
1221
		i = (reg - 0x44C0) >> 2;
926
		i = (reg - 0x44C0) >> 2;
1222
		tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
927
		tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
1223
		track->textures[i].tex_coord_type = tmp;
928
		track->textures[i].tex_coord_type = tmp;
1224
		switch ((ib_chunk->kdata[idx] & 0x1F)) {
929
		switch ((ib_chunk->kdata[idx] & 0x1F)) {
1225
		case 0:
-
 
1226
		case 2:
-
 
1227
		case 5:
-
 
1228
		case 18:
930
		case R300_TX_FORMAT_X8:
1229
		case 20:
931
		case R300_TX_FORMAT_Y4X4:
1230
		case 21:
932
		case R300_TX_FORMAT_Z3Y3X2:
1231
			track->textures[i].cpp = 1;
933
			track->textures[i].cpp = 1;
1232
			break;
934
			break;
1233
		case 1:
935
		case R300_TX_FORMAT_X16:
1234
		case 3:
936
		case R300_TX_FORMAT_Y8X8:
1235
		case 6:
937
		case R300_TX_FORMAT_Z5Y6X5:
1236
		case 7:
938
		case R300_TX_FORMAT_Z6Y5X5:
1237
		case 10:
939
		case R300_TX_FORMAT_W4Z4Y4X4:
1238
		case 11:
940
		case R300_TX_FORMAT_W1Z5Y5X5:
1239
		case 19:
941
		case R300_TX_FORMAT_DXT1:
-
 
942
		case R300_TX_FORMAT_D3DMFT_CxV8U8:
1240
		case 22:
943
		case R300_TX_FORMAT_B8G8_B8G8:
1241
		case 24:
944
		case R300_TX_FORMAT_G8R8_G8B8:
1242
			track->textures[i].cpp = 2;
945
			track->textures[i].cpp = 2;
1243
			break;
946
			break;
1244
		case 4:
947
		case R300_TX_FORMAT_Y16X16:
-
 
948
		case R300_TX_FORMAT_Z11Y11X10:
1245
		case 8:
949
		case R300_TX_FORMAT_Z10Y11X11:
1246
		case 9:
950
		case R300_TX_FORMAT_W8Z8Y8X8:
1247
		case 12:
951
		case R300_TX_FORMAT_W2Z10Y10X10:
1248
		case 13:
952
		case 0x17:
1249
		case 23:
953
		case R300_TX_FORMAT_FL_I32:
1250
		case 25:
954
		case 0x1e:
1251
		case 27:
955
		case R300_TX_FORMAT_DXT3:
1252
		case 30:
956
		case R300_TX_FORMAT_DXT5:
1253
			track->textures[i].cpp = 4;
957
			track->textures[i].cpp = 4;
1254
			break;
958
			break;
1255
		case 14:
959
		case R300_TX_FORMAT_W16Z16Y16X16:
1256
		case 26:
960
		case R300_TX_FORMAT_FL_R16G16B16A16:
1257
		case 28:
961
		case R300_TX_FORMAT_FL_I32A32:
1258
			track->textures[i].cpp = 8;
962
			track->textures[i].cpp = 8;
1259
			break;
963
			break;
1260
		case 29:
964
		case R300_TX_FORMAT_FL_R32G32B32A32:
1261
			track->textures[i].cpp = 16;
965
			track->textures[i].cpp = 16;
1262
			break;
966
			break;
1263
		default:
967
		default:
1264
			DRM_ERROR("Invalid texture format %u\n",
968
			DRM_ERROR("Invalid texture format %u\n",
1265
				  (ib_chunk->kdata[idx] & 0x1F));
969
				  (ib_chunk->kdata[idx] & 0x1F));
Line 1283... Line 987...
1283
	case 0x4434:
987
	case 0x4434:
1284
	case 0x4438:
988
	case 0x4438:
1285
	case 0x443C:
989
	case 0x443C:
1286
		/* TX_FILTER0_[0-15] */
990
		/* TX_FILTER0_[0-15] */
1287
		i = (reg - 0x4400) >> 2;
991
		i = (reg - 0x4400) >> 2;
1288
		tmp = ib_chunk->kdata[idx] & 0x7;;
992
		tmp = ib_chunk->kdata[idx] & 0x7;
1289
		if (tmp == 2 || tmp == 4 || tmp == 6) {
993
		if (tmp == 2 || tmp == 4 || tmp == 6) {
1290
			track->textures[i].roundup_w = false;
994
			track->textures[i].roundup_w = false;
1291
		}
995
		}
1292
		tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;;
996
		tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
1293
		if (tmp == 2 || tmp == 4 || tmp == 6) {
997
		if (tmp == 2 || tmp == 4 || tmp == 6) {
1294
			track->textures[i].roundup_h = false;
998
			track->textures[i].roundup_h = false;
1295
		}
999
		}
1296
		break;
1000
		break;
1297
	case 0x4500:
1001
	case 0x4500:
Line 1348... Line 1052...
1348
		tmp = ib_chunk->kdata[idx] & (1 << 31);
1052
		tmp = ib_chunk->kdata[idx] & (1 << 31);
1349
		track->textures[i].use_pitch = !!tmp;
1053
		track->textures[i].use_pitch = !!tmp;
1350
		tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1054
		tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
1351
		track->textures[i].txdepth = tmp;
1055
		track->textures[i].txdepth = tmp;
1352
		break;
1056
		break;
-
 
1057
	case R300_ZB_ZPASS_ADDR:
-
 
1058
		r = r100_cs_packet_next_reloc(p, &reloc);
-
 
1059
		if (r) {
-
 
1060
			DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
-
 
1061
					idx, reg);
-
 
1062
			r100_cs_dump_packet(p, pkt);
-
 
1063
			return r;
-
 
1064
		}
-
 
1065
		ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
-
 
1066
		break;
-
 
1067
	case 0x4be8:
-
 
1068
		/* valid register only on RV530 */
-
 
1069
		if (p->rdev->family == CHIP_RV530)
-
 
1070
			break;
-
 
1071
		/* fallthrough do not move */
1353
	default:
1072
	default:
1354
		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1073
		printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
1355
		       reg, idx);
1074
		       reg, idx);
1356
		return -EINVAL;
1075
		return -EINVAL;
1357
	}
1076
	}
Line 1360... Line 1079...
1360
 
1079
 
1361
static int r300_packet3_check(struct radeon_cs_parser *p,
1080
static int r300_packet3_check(struct radeon_cs_parser *p,
1362
			      struct radeon_cs_packet *pkt)
1081
			      struct radeon_cs_packet *pkt)
1363
{
1082
{
-
 
1083
	struct radeon_cs_chunk *ib_chunk;
1364
	struct radeon_cs_chunk *ib_chunk;
1084
 
1365
	struct radeon_cs_reloc *reloc;
1085
	struct radeon_cs_reloc *reloc;
1366
	struct r300_cs_track *track;
1086
	struct r100_cs_track *track;
1367
	volatile uint32_t *ib;
1087
	volatile uint32_t *ib;
1368
	unsigned idx;
1088
	unsigned idx;
1369
	unsigned i, c;
1089
	unsigned i, c;
Line 1370... Line 1090...
1370
	int r;
1090
	int r;
1371
 
1091
 
1372
	ib = p->ib->ptr;
1092
	ib = p->ib->ptr;
1373
	ib_chunk = &p->chunks[p->chunk_ib_idx];
1093
	ib_chunk = &p->chunks[p->chunk_ib_idx];
1374
	idx = pkt->idx + 1;
1094
	idx = pkt->idx + 1;
1375
	track = (struct r300_cs_track*)p->track;
1095
	track = (struct r100_cs_track *)p->track;
1376
	switch(pkt->opcode) {
1096
	switch(pkt->opcode) {
1377
	case PACKET3_3D_LOAD_VBPNTR:
1097
	case PACKET3_3D_LOAD_VBPNTR:
1378
		c = ib_chunk->kdata[idx++] & 0x1F;
1098
		c = ib_chunk->kdata[idx++] & 0x1F;
Line 1437... Line 1157...
1437
			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1157
			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1438
			return -EINVAL;
1158
			return -EINVAL;
1439
		}
1159
		}
1440
		track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1160
		track->vap_vf_cntl = ib_chunk->kdata[idx+1];
1441
		track->immd_dwords = pkt->count - 1;
1161
		track->immd_dwords = pkt->count - 1;
1442
		r = r300_cs_track_check(p->rdev, track);
1162
		r = r100_cs_track_check(p->rdev, track);
1443
		if (r) {
1163
		if (r) {
1444
			return r;
1164
			return r;
1445
		}
1165
		}
1446
		break;
1166
		break;
1447
	case PACKET3_3D_DRAW_IMMD_2:
1167
	case PACKET3_3D_DRAW_IMMD_2:
Line 1452... Line 1172...
1452
			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1172
			DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1453
			return -EINVAL;
1173
			return -EINVAL;
1454
		}
1174
		}
1455
		track->vap_vf_cntl = ib_chunk->kdata[idx];
1175
		track->vap_vf_cntl = ib_chunk->kdata[idx];
1456
		track->immd_dwords = pkt->count;
1176
		track->immd_dwords = pkt->count;
1457
		r = r300_cs_track_check(p->rdev, track);
1177
		r = r100_cs_track_check(p->rdev, track);
1458
		if (r) {
1178
		if (r) {
1459
			return r;
1179
			return r;
1460
		}
1180
		}
1461
		break;
1181
		break;
1462
	case PACKET3_3D_DRAW_VBUF:
1182
	case PACKET3_3D_DRAW_VBUF:
1463
		track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1183
		track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1464
		r = r300_cs_track_check(p->rdev, track);
1184
		r = r100_cs_track_check(p->rdev, track);
1465
		if (r) {
1185
		if (r) {
1466
			return r;
1186
			return r;
1467
		}
1187
		}
1468
		break;
1188
		break;
1469
	case PACKET3_3D_DRAW_VBUF_2:
1189
	case PACKET3_3D_DRAW_VBUF_2:
1470
		track->vap_vf_cntl = ib_chunk->kdata[idx];
1190
		track->vap_vf_cntl = ib_chunk->kdata[idx];
1471
		r = r300_cs_track_check(p->rdev, track);
1191
		r = r100_cs_track_check(p->rdev, track);
1472
		if (r) {
1192
		if (r) {
1473
			return r;
1193
			return r;
1474
		}
1194
		}
1475
		break;
1195
		break;
1476
	case PACKET3_3D_DRAW_INDX:
1196
	case PACKET3_3D_DRAW_INDX:
1477
		track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1197
		track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
1478
		r = r300_cs_track_check(p->rdev, track);
1198
		r = r100_cs_track_check(p->rdev, track);
1479
		if (r) {
1199
		if (r) {
1480
			return r;
1200
			return r;
1481
		}
1201
		}
1482
		break;
1202
		break;
1483
	case PACKET3_3D_DRAW_INDX_2:
1203
	case PACKET3_3D_DRAW_INDX_2:
1484
		track->vap_vf_cntl = ib_chunk->kdata[idx];
1204
		track->vap_vf_cntl = ib_chunk->kdata[idx];
1485
		r = r300_cs_track_check(p->rdev, track);
1205
		r = r100_cs_track_check(p->rdev, track);
1486
		if (r) {
1206
		if (r) {
1487
			return r;
1207
			return r;
1488
		}
1208
		}
1489
		break;
1209
		break;
1490
	case PACKET3_NOP:
1210
	case PACKET3_NOP:
Line 1497... Line 1217...
1497
}
1217
}
Line 1498... Line 1218...
1498
 
1218
 
1499
int r300_cs_parse(struct radeon_cs_parser *p)
1219
int r300_cs_parse(struct radeon_cs_parser *p)
1500
{
1220
{
1501
	struct radeon_cs_packet pkt;
1221
	struct radeon_cs_packet pkt;
1502
	struct r300_cs_track track;
1222
	struct r100_cs_track *track;
Line -... Line 1223...
-
 
1223
	int r;
1503
	int r;
1224
 
1504
 
1225
	track = kzalloc(sizeof(*track), GFP_KERNEL);
1505
	r300_cs_track_clear(&track);
1226
	r100_cs_track_clear(p->rdev, track);
1506
	p->track = &track;
1227
	p->track = track;
1507
	do {
1228
	do {
1508
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1229
		r = r100_cs_packet_parse(p, &pkt, p->idx);
1509
		if (r) {
1230
		if (r) {
Line 1530... Line 1251...
1530
			return r;
1251
			return r;
1531
		}
1252
		}
1532
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1253
	} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
1533
	return 0;
1254
	return 0;
1534
}
1255
}
1535
 
-
 
1536
#endif
1256
#endif
Line -... Line 1257...
-
 
1257
 
1537
 
1258
 
1538
int r300_init(struct radeon_device *rdev)
1259
void r300_set_reg_safe(struct radeon_device *rdev)
1539
{
1260
{
1540
	rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
1261
	rdev->config.r300.reg_safe_bm = r300_reg_safe_bm;
-
 
1262
	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
-
 
1263
}
-
 
1264
 
-
 
1265
int r300_init(struct radeon_device *rdev)
-
 
1266
{
1541
	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm);
1267
	r300_set_reg_safe(rdev);
1542
	return 0;
1268
	return 0;
Line -... Line 1269...
-
 
1269
}
-
 
1270
 
-
 
1271
void r300_mc_program(struct radeon_device *rdev)
-
 
1272
{
-
 
1273
	struct r100_mc_save save;
-
 
1274
	int r;
-
 
1275
 
-
 
1276
	r = r100_debugfs_mc_info_init(rdev);
-
 
1277
	if (r) {
Line -... Line 1278...
-
 
1278
		dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n");
-
 
1279
	}
-
 
1280
 
-
 
1281
	/* Stops all mc clients */
-
 
1282
	r100_mc_stop(rdev, &save);
-
 
1283
	if (rdev->flags & RADEON_IS_AGP) {
-
 
1284
		WREG32(R_00014C_MC_AGP_LOCATION,
-
 
1285
			S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) |
-
 
1286
			S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
-
 
1287
		WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
-
 
1288
		WREG32(R_00015C_AGP_BASE_2,
-
 
1289
			upper_32_bits(rdev->mc.agp_base) & 0xff);
-
 
1290
	} else {
-
 
1291
		WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF);
-
 
1292
		WREG32(R_000170_AGP_BASE, 0);
-
 
1293
		WREG32(R_00015C_AGP_BASE_2, 0);
-
 
1294
	}
-
 
1295
	/* Wait for mc idle */
-
 
1296
	if (r300_mc_wait_for_idle(rdev))
-
 
1297
		DRM_INFO("Failed to wait MC idle before programming MC.\n");
-
 
1298
	/* Program MC, should be a 32bits limited address space */
-
 
1299
	WREG32(R_000148_MC_FB_LOCATION,
-
 
1300
		S_000148_MC_FB_START(rdev->mc.vram_start >> 16) |