Subversion Repositories Kolibri OS

Rev

Rev 5078 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5078 Rev 5271
Line 78... Line 78...
78
	if (vtx_fmt_0 & R200_VTX_N1)
78
	if (vtx_fmt_0 & R200_VTX_N1)
79
		vtx_size += 3;
79
		vtx_size += 3;
80
	return vtx_size;
80
	return vtx_size;
81
}
81
}
Line 82... Line 82...
82
 
82
 
83
int r200_copy_dma(struct radeon_device *rdev,
83
struct radeon_fence *r200_copy_dma(struct radeon_device *rdev,
84
		  uint64_t src_offset,
84
		  uint64_t src_offset,
85
		  uint64_t dst_offset,
85
		  uint64_t dst_offset,
86
		  unsigned num_gpu_pages,
86
		  unsigned num_gpu_pages,
87
		  struct radeon_fence **fence)
87
				   struct reservation_object *resv)
88
{
88
{
-
 
89
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
89
	struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
90
	struct radeon_fence *fence;
90
	uint32_t size;
91
	uint32_t size;
91
	uint32_t cur_size;
92
	uint32_t cur_size;
92
	int i, num_loops;
93
	int i, num_loops;
Line 96... Line 97...
96
	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
97
	size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
97
	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
98
	num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
98
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
99
	r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
99
	if (r) {
100
	if (r) {
100
		DRM_ERROR("radeon: moving bo (%d).\n", r);
101
		DRM_ERROR("radeon: moving bo (%d).\n", r);
101
		return r;
102
		return ERR_PTR(r);
102
	}
103
	}
103
	/* Must wait for 2D idle & clean before DMA or hangs might happen */
104
	/* Must wait for 2D idle & clean before DMA or hangs might happen */
104
	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
105
	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
105
	radeon_ring_write(ring, (1 << 16));
106
	radeon_ring_write(ring, (1 << 16));
106
	for (i = 0; i < num_loops; i++) {
107
	for (i = 0; i < num_loops; i++) {
Line 116... Line 117...
116
		src_offset += cur_size;
117
		src_offset += cur_size;
117
		dst_offset += cur_size;
118
		dst_offset += cur_size;
118
	}
119
	}
119
	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
120
	radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
120
	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
121
	radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
-
 
122
	r = radeon_fence_emit(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
121
	if (fence) {
123
	if (r) {
122
		r = radeon_fence_emit(rdev, fence, RADEON_RING_TYPE_GFX_INDEX);
124
		radeon_ring_unlock_undo(rdev, ring);
-
 
125
		return ERR_PTR(r);
123
	}
126
	}
124
	radeon_ring_unlock_commit(rdev, ring, false);
127
	radeon_ring_unlock_commit(rdev, ring, false);
125
	return r;
128
	return fence;
126
}
129
}
Line 127... Line 130...
127
 
130
 
128
 
131
 
Line 141... Line 144...
141
 
144
 
142
int r200_packet0_check(struct radeon_cs_parser *p,
145
int r200_packet0_check(struct radeon_cs_parser *p,
143
		       struct radeon_cs_packet *pkt,
146
		       struct radeon_cs_packet *pkt,
144
		       unsigned idx, unsigned reg)
147
		       unsigned idx, unsigned reg)
145
{
148
{
146
	struct radeon_cs_reloc *reloc;
149
	struct radeon_bo_list *reloc;
147
	struct r100_cs_track *track;
150
	struct r100_cs_track *track;
148
	volatile uint32_t *ib;
151
	volatile uint32_t *ib;
149
	uint32_t tmp;
152
	uint32_t tmp;
150
	int r;
153
	int r;