Subversion Repositories Kolibri OS

Rev

Rev 5271 | Rev 6321 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 5271 Rev 6104
Line 28... Line 28...
28
#include 
28
#include 
29
#include 
29
#include 
30
#include "radeon.h"
30
#include "radeon.h"
Line 31... Line -...
31
 
-
 
32
 
31
 
33
static inline void *
32
 
34
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
33
void* pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
Line 35... Line 34...
35
                      addr_t *dma_handle)
34
                      addr_t *dma_handle)
Line 172... Line 171...
172
	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
171
	r = radeon_bo_kmap(rdev->gart.robj, &rdev->gart.ptr);
173
	if (r)
172
	if (r)
174
		radeon_bo_unpin(rdev->gart.robj);
173
		radeon_bo_unpin(rdev->gart.robj);
175
	radeon_bo_unreserve(rdev->gart.robj);
174
	radeon_bo_unreserve(rdev->gart.robj);
176
	rdev->gart.table_addr = gpu_addr;
175
	rdev->gart.table_addr = gpu_addr;
-
 
176
 
-
 
177
	if (!r) {
-
 
178
		int i;
-
 
179
 
-
 
180
		/* We might have dropped some GART table updates while it wasn't
-
 
181
		 * mapped, restore all entries
-
 
182
		 */
-
 
183
		for (i = 0; i < rdev->gart.num_gpu_pages; i++)
-
 
184
			radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
-
 
185
		mb();
-
 
186
		radeon_gart_tlb_flush(rdev);
-
 
187
	}
-
 
188
 
177
    return r;
189
	return r;
178
}
190
}
Line 179... Line 191...
179
 
191
 
180
/**
192
/**
Line 235... Line 247...
235
			int pages)
247
			int pages)
236
{
248
{
237
	unsigned t;
249
	unsigned t;
238
	unsigned p;
250
	unsigned p;
239
	int i, j;
251
	int i, j;
240
	u64 page_base;
-
 
Line 241... Line 252...
241
 
252
 
242
	if (!rdev->gart.ready) {
253
	if (!rdev->gart.ready) {
243
		WARN(1, "trying to unbind memory from uninitialized GART !\n");
254
		WARN(1, "trying to unbind memory from uninitialized GART !\n");
244
		return;
255
		return;
245
	}
256
	}
246
	t = offset / RADEON_GPU_PAGE_SIZE;
257
	t = offset / RADEON_GPU_PAGE_SIZE;
247
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
258
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
248
	for (i = 0; i < pages; i++, p++) {
259
	for (i = 0; i < pages; i++, p++) {
249
		if (rdev->gart.pages[p]) {
260
		if (rdev->gart.pages[p]) {
250
			rdev->gart.pages[p] = NULL;
-
 
251
			rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
-
 
252
			page_base = rdev->gart.pages_addr[p];
261
			rdev->gart.pages[p] = NULL;
-
 
262
			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
253
			for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
263
				rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
254
				if (rdev->gart.ptr) {
264
				if (rdev->gart.ptr) {
255
					radeon_gart_set_page(rdev, t, page_base,
265
					radeon_gart_set_page(rdev, t,
256
							     RADEON_GART_PAGE_DUMMY);
266
							     rdev->dummy_page.entry);
257
				}
-
 
258
				page_base += RADEON_GPU_PAGE_SIZE;
267
				}
259
			}
268
			}
260
		}
269
		}
-
 
270
	}
261
	}
271
	if (rdev->gart.ptr) {
262
	mb();
272
		mb();
263
	radeon_gart_tlb_flush(rdev);
273
		radeon_gart_tlb_flush(rdev);
-
 
274
	}
Line 264... Line 275...
264
}
275
}
265
 
276
 
266
/**
277
/**
267
 * radeon_gart_bind - bind pages into the gart page table
278
 * radeon_gart_bind - bind pages into the gart page table
Line 281... Line 292...
281
		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
292
		     int pages, struct page **pagelist, dma_addr_t *dma_addr,
282
		     uint32_t flags)
293
		     uint32_t flags)
283
{
294
{
284
    unsigned t;
295
	unsigned t;
285
    unsigned p;
296
	unsigned p;
286
    uint64_t page_base;
297
	uint64_t page_base, page_entry;
287
    int i, j;
298
	int i, j;
Line 288... Line 299...
288
 
299
 
289
    if (!rdev->gart.ready) {
300
	if (!rdev->gart.ready) {
290
		WARN(1, "trying to bind memory to uninitialized GART !\n");
301
		WARN(1, "trying to bind memory to uninitialized GART !\n");
291
        return -EINVAL;
302
		return -EINVAL;
292
    }
303
	}
293
	t = offset / RADEON_GPU_PAGE_SIZE;
304
	t = offset / RADEON_GPU_PAGE_SIZE;
Line 294... Line 305...
294
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
305
	p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
295
 
-
 
296
    for (i = 0; i < pages; i++, p++) {
306
 
297
		rdev->gart.pages_addr[p] = dma_addr[i];
-
 
298
        rdev->gart.pages[p] = pagelist[i];
307
	for (i = 0; i < pages; i++, p++) {
299
		if (rdev->gart.ptr) {
308
		rdev->gart.pages[p] = pagelist[i];
300
		page_base = rdev->gart.pages_addr[p];
309
		page_base = dma_addr[i];
-
 
310
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
-
 
311
			page_entry = radeon_gart_get_page_entry(page_base, flags);
301
		for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
312
			rdev->gart.pages_entry[t] = page_entry;
302
				radeon_gart_set_page(rdev, t, page_base, flags);
313
			if (rdev->gart.ptr) {
-
 
314
				radeon_gart_set_page(rdev, t, page_entry);
303
			page_base += RADEON_GPU_PAGE_SIZE;
315
			}
304
        }
316
			page_base += RADEON_GPU_PAGE_SIZE;
-
 
317
		}
305
    }
318
	}
306
	}
319
	if (rdev->gart.ptr) {
-
 
320
		mb();
307
    mb();
321
		radeon_gart_tlb_flush(rdev);
308
    radeon_gart_tlb_flush(rdev);
322
	}
Line 309... Line 323...
309
    return 0;
323
	return 0;
310
}
324
}
Line 341... Line 355...
341
	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
355
	rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
342
    if (rdev->gart.pages == NULL) {
356
	if (rdev->gart.pages == NULL) {
343
		radeon_gart_fini(rdev);
357
		radeon_gart_fini(rdev);
344
        return -ENOMEM;
358
		return -ENOMEM;
345
    }
359
	}
346
	rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
360
    rdev->gart.pages_entry = KernelAlloc(sizeof(uint64_t) *
347
					rdev->gart.num_cpu_pages);
361
					 rdev->gart.num_gpu_pages);
348
    if (rdev->gart.pages_addr == NULL) {
362
	if (rdev->gart.pages_entry == NULL) {
349
		radeon_gart_fini(rdev);
363
		radeon_gart_fini(rdev);
350
        return -ENOMEM;
364
		return -ENOMEM;
351
    }
365
	}
352
	/* set GART entry to point to the dummy page by default */
366
	/* set GART entry to point to the dummy page by default */
353
	for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
367
	for (i = 0; i < rdev->gart.num_gpu_pages; i++)
354
		rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
368
		rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
355
	}
-
 
356
    return 0;
369
	return 0;
357
}
370
}
Line 358... Line 371...
358
 
371
 
359
/**
372
/**
Line 363... Line 376...
363
 *
376
 *
364
 * Tear down the gart driver info and free the dummy page (all asics).
377
 * Tear down the gart driver info and free the dummy page (all asics).
365
 */
378
 */
366
void radeon_gart_fini(struct radeon_device *rdev)
379
void radeon_gart_fini(struct radeon_device *rdev)
367
{
380
{
368
	if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
381
	if (rdev->gart.ready) {
369
		/* unbind pages */
382
		/* unbind pages */
370
		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
383
		radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
371
	}
384
	}
372
	rdev->gart.ready = false;
385
	rdev->gart.ready = false;
373
	vfree(rdev->gart.pages);
386
	vfree(rdev->gart.pages);
374
	vfree(rdev->gart.pages_addr);
387
	vfree(rdev->gart.pages_entry);
375
	rdev->gart.pages = NULL;
388
	rdev->gart.pages = NULL;
376
	rdev->gart.pages_addr = NULL;
389
	rdev->gart.pages_entry = NULL;
Line 377... Line 390...
377
 
390
 
378
	radeon_dummy_page_fini(rdev);
391
	radeon_dummy_page_fini(rdev);