Subversion Repositories Kolibri OS

Rev

Rev 1428 | Rev 1631 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1428 Rev 1430
Line 137... Line 137...
137
			int pages)
137
			int pages)
138
{
138
{
139
	unsigned t;
139
	unsigned t;
140
	unsigned p;
140
	unsigned p;
141
	int i, j;
141
	int i, j;
-
 
142
	u64 page_base;
Line 142... Line 143...
142
 
143
 
143
	if (!rdev->gart.ready) {
144
	if (!rdev->gart.ready) {
144
		WARN(1, "trying to unbind memory to unitialized GART !\n");
145
		WARN(1, "trying to unbind memory to unitialized GART !\n");
145
		return;
146
		return;
Line 149... Line 150...
149
	for (i = 0; i < pages; i++, p++) {
150
	for (i = 0; i < pages; i++, p++) {
150
		if (rdev->gart.pages[p]) {
151
		if (rdev->gart.pages[p]) {
151
//           pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
152
//           pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
152
//                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
153
//                      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
153
			rdev->gart.pages[p] = NULL;
154
			rdev->gart.pages[p] = NULL;
-
 
155
			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
154
			rdev->gart.pages_addr[p] = 0;
156
			page_base = rdev->gart.pages_addr[p];
155
			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
157
			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
156
				radeon_gart_set_page(rdev, t, 0);
158
				radeon_gart_set_page(rdev, t, page_base);
-
 
159
				page_base += RADEON_GPU_PAGE_SIZE;
157
			}
160
			}
158
		}
161
		}
159
	}
162
	}
160
	mb();
163
	mb();
161
	radeon_gart_tlb_flush(rdev);
164
	radeon_gart_tlb_flush(rdev);
Line 185... Line 188...
185
        /* we need to support large memory configurations */
188
        /* we need to support large memory configurations */
186
        /* assume that unbind have already been call on the range */
189
        /* assume that unbind have already been call on the range */
Line 187... Line 190...
187
 
190
 
Line 188... Line -...
188
        rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
-
 
189
 
-
 
190
        //if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
-
 
191
        //    /* FIXME: failed to map page (return -ENOMEM?) */
-
 
192
        //    radeon_gart_unbind(rdev, offset, pages);
191
        rdev->gart.pages_addr[p] = pagelist[i] & ~4095;
193
        //    return -ENOMEM;
192
 
194
        //}
193
 
195
        rdev->gart.pages[p] = pagelist[i];
194
        rdev->gart.pages[p] = pagelist[i];
196
		page_base = rdev->gart.pages_addr[p];
195
		page_base = rdev->gart.pages_addr[p];
197
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
196
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
Line 202... Line 201...
202
    mb();
201
    mb();
203
    radeon_gart_tlb_flush(rdev);
202
    radeon_gart_tlb_flush(rdev);
204
    return 0;
203
    return 0;
205
}
204
}
Line -... Line 205...
-
 
205
 
-
 
206
void radeon_gart_restore(struct radeon_device *rdev)
-
 
207
{
-
 
208
	int i, j, t;
-
 
209
	u64 page_base;
-
 
210
 
-
 
211
	for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
-
 
212
		page_base = rdev->gart.pages_addr[i];
-
 
213
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-
 
214
			radeon_gart_set_page(rdev, t, page_base);
-
 
215
			page_base += RADEON_GPU_PAGE_SIZE;
-
 
216
		}
-
 
217
	}
-
 
218
	mb();
-
 
219
	radeon_gart_tlb_flush(rdev);
-
 
220
}
206
 
221
 
207
int radeon_gart_init(struct radeon_device *rdev)
222
int radeon_gart_init(struct radeon_device *rdev)
-
 
223
{
-
 
224
	int r, i;
208
{
225
 
209
    if (rdev->gart.pages) {
226
    if (rdev->gart.pages) {
210
        return 0;
227
        return 0;
211
    }
228
    }
212
	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
229
	/* We need PAGE_SIZE >= RADEON_GPU_PAGE_SIZE */
213
	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
230
	if (PAGE_SIZE < RADEON_GPU_PAGE_SIZE) {
214
        DRM_ERROR("Page size is smaller than GPU page size!\n");
231
        DRM_ERROR("Page size is smaller than GPU page size!\n");
215
        return -EINVAL;
232
        return -EINVAL;
-
 
233
    }
-
 
234
	r = radeon_dummy_page_init(rdev);
-
 
235
	if (r)
216
    }
236
		return r;
217
    /* Compute table size */
237
    /* Compute table size */
218
    rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
238
    rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
219
	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
239
	rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
220
    DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
240
    DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
Line 230... Line 250...
230
                    rdev->gart.num_cpu_pages, GFP_KERNEL);
250
                    rdev->gart.num_cpu_pages, GFP_KERNEL);
231
    if (rdev->gart.pages_addr == NULL) {
251
    if (rdev->gart.pages_addr == NULL) {
232
		radeon_gart_fini(rdev);
252
		radeon_gart_fini(rdev);
233
        return -ENOMEM;
253
        return -ENOMEM;
234
    }
254
    }
-
 
255
	/* set GART entry to point to the dummy page by default */
-
 
256
	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
-
 
257
		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
-
 
258
	}
235
    return 0;
259
    return 0;
236
}
260
}
Line 237... Line 261...
237
 
261
 
238
void radeon_gart_fini(struct radeon_device *rdev)
262
void radeon_gart_fini(struct radeon_device *rdev)