Rev 1119 | Rev 1125 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1119 | Rev 1120 | ||
---|---|---|---|
Line 153... | Line 153... | ||
153 | } |
153 | } |
Line 154... | Line 154... | ||
154 | 154 | ||
155 | void r520_gpu_init(struct radeon_device *rdev) |
155 | void r520_gpu_init(struct radeon_device *rdev) |
156 | { |
156 | { |
157 | unsigned pipe_select_current, gb_pipe_select, tmp; |
157 | unsigned pipe_select_current, gb_pipe_select, tmp; |
Line 158... | Line 158... | ||
158 | dbgprintf("%s\n\r",__FUNCTION__); |
158 | dbgprintf("%s\n",__FUNCTION__); |
159 | 159 | ||
160 | r100_hdp_reset(rdev); |
160 | r100_hdp_reset(rdev); |
161 | rs600_disable_vga(rdev); |
161 | rs600_disable_vga(rdev); |
Line 202... | Line 202... | ||
202 | * VRAM info |
202 | * VRAM info |
203 | */ |
203 | */ |
204 | static void r520_vram_get_type(struct radeon_device *rdev) |
204 | static void r520_vram_get_type(struct radeon_device *rdev) |
205 | { |
205 | { |
206 | uint32_t tmp; |
206 | uint32_t tmp; |
207 | dbgprintf("%s\n\r",__FUNCTION__); |
207 | dbgprintf("%s\n",__FUNCTION__); |
Line 208... | Line 208... | ||
208 | 208 | ||
209 | rdev->mc.vram_width = 128; |
209 | rdev->mc.vram_width = 128; |
210 | rdev->mc.vram_is_ddr = true; |
210 | rdev->mc.vram_is_ddr = true; |
211 | tmp = RREG32_MC(R520_MC_CNTL0); |
211 | tmp = RREG32_MC(R520_MC_CNTL0); |
Line 243... | Line 243... | ||
243 | * Global GPU functions |
243 | * Global GPU functions |
244 | */ |
244 | */ |
245 | void rs600_disable_vga(struct radeon_device *rdev) |
245 | void rs600_disable_vga(struct radeon_device *rdev) |
246 | { |
246 | { |
247 | unsigned tmp; |
247 | unsigned tmp; |
248 | dbgprintf("%s\n\r",__FUNCTION__); |
248 | dbgprintf("%s\n",__FUNCTION__); |
Line 249... | Line 249... | ||
249 | 249 | ||
250 | WREG32(0x330, 0); |
250 | WREG32(0x330, 0); |
251 | WREG32(0x338, 0); |
251 | WREG32(0x338, 0); |
252 | tmp = RREG32(0x300); |
252 | tmp = RREG32(0x300); |
Line 262... | Line 262... | ||
262 | { |
262 | { |
263 | unsigned tmp; |
263 | unsigned tmp; |
264 | unsigned gb_pipe_select; |
264 | unsigned gb_pipe_select; |
265 | unsigned num_pipes; |
265 | unsigned num_pipes; |
Line 266... | Line 266... | ||
266 | 266 | ||
Line 267... | Line 267... | ||
267 | dbgprintf("%s\n\r",__FUNCTION__); |
267 | dbgprintf("%s\n",__FUNCTION__); |
268 | 268 | ||
269 | /* GA_ENHANCE workaround TCL deadlock issue */ |
269 | /* GA_ENHANCE workaround TCL deadlock issue */ |
270 | WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); |
270 | WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); |
Line 312... | Line 312... | ||
312 | "programming pipes. Bad things might happen.\n"); |
312 | "programming pipes. Bad things might happen.\n"); |
313 | } |
313 | } |
314 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); |
314 | DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); |
315 | } |
315 | } |
Line 316... | Line -... | ||
316 | - | ||
317 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
- | |
318 | { |
- | |
319 | uint32_t tmp; |
- | |
320 | dbgprintf("%s\n\r",__FUNCTION__); |
- | |
321 | - | ||
322 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
- | |
323 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
- | |
324 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
- | |
325 | if (rdev->gart.table.vram.robj) { |
- | |
326 | // radeon_object_kunmap(rdev->gart.table.vram.robj); |
- | |
327 | // radeon_object_unpin(rdev->gart.table.vram.robj); |
- | |
328 | } |
- | |
329 | } |
- | |
330 | - | ||
331 | void radeon_gart_table_vram_free(struct radeon_device *rdev) |
- | |
332 | { |
- | |
333 | if (rdev->gart.table.vram.robj == NULL) { |
- | |
334 | return; |
- | |
335 | } |
- | |
336 | // radeon_object_kunmap(rdev->gart.table.vram.robj); |
- | |
337 | // radeon_object_unpin(rdev->gart.table.vram.robj); |
- | |
338 | // radeon_object_unref(&rdev->gart.table.vram.robj); |
- | |
339 | } |
- | |
340 | - | ||
341 | /* |
- | |
342 | * Common gart functions. |
- | |
343 | */ |
- | |
344 | void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
- | |
345 | int pages) |
- | |
346 | { |
- | |
347 | unsigned t; |
- | |
348 | unsigned p; |
- | |
349 | int i, j; |
- | |
350 | dbgprintf("%s\n\r",__FUNCTION__); |
- | |
351 | - | ||
352 | if (!rdev->gart.ready) { |
- | |
353 | dbgprintf("trying to unbind memory to unitialized GART !\n"); |
- | |
354 | return; |
- | |
355 | } |
- | |
356 | t = offset / 4096; |
- | |
357 | p = t / (PAGE_SIZE / 4096); |
- | |
358 | for (i = 0; i < pages; i++, p++) { |
- | |
359 | if (rdev->gart.pages[p]) { |
- | |
360 | // pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p], |
- | |
361 | // PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
- | |
362 | rdev->gart.pages[p] = NULL; |
- | |
363 | rdev->gart.pages_addr[p] = 0; |
- | |
364 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
- | |
365 | radeon_gart_set_page(rdev, t, 0); |
- | |
366 | } |
- | |
367 | } |
- | |
368 | } |
- | |
369 | mb(); |
- | |
370 | radeon_gart_tlb_flush(rdev); |
- | |
371 | } |
- | |
372 | - | ||
373 | - | ||
374 | - | ||
375 | void radeon_gart_fini(struct radeon_device *rdev) |
- | |
376 | { |
- | |
377 | if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) { |
- | |
378 | /* unbind pages */ |
- | |
379 | radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages); |
- | |
380 | } |
- | |
381 | rdev->gart.ready = false; |
- | |
382 | // kfree(rdev->gart.pages); |
- | |
383 | // kfree(rdev->gart.pages_addr); |
- | |
384 | rdev->gart.pages = NULL; |
- | |
385 | rdev->gart.pages_addr = NULL; |
- | |
386 | } |
- | |
387 | - | ||
Line 388... | Line 316... | ||
388 | 316 | ||
389 | 317 | ||
Line 390... | Line 318... | ||
390 | int radeon_agp_init(struct radeon_device *rdev) |
318 | int radeon_agp_init(struct radeon_device *rdev) |
Line 391... | Line 319... | ||
391 | { |
319 | { |
392 | 320 | ||
393 | dbgprintf("%s\n\r",__FUNCTION__); |
321 | dbgprintf("%s\n",__FUNCTION__); |
394 | 322 | ||
Line 533... | Line 461... | ||
533 | 461 | ||
Line 534... | Line 462... | ||
534 | dbgprintf("done\n"); |
462 | dbgprintf("done\n"); |
Line 535... | Line -... | ||
535 | - | ||
536 | } |
- | |
537 | - | ||
538 | int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
- | |
539 | { |
- | |
540 | void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
- | |
541 | - | ||
542 | if (i < 0 || i > rdev->gart.num_gpu_pages) { |
- | |
543 | return -EINVAL; |
- | |
544 | } |
- | |
545 | addr = (((u32_t)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; |
- | |
546 | writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); |
- | |
547 | return 0; |
- | |
548 | } |
- | |
549 | - | ||
550 | - | ||
551 | int radeon_gart_init(struct radeon_device *rdev) |
- | |
552 | { |
- | |
553 | - | ||
554 | dbgprintf("%s\n",__FUNCTION__); |
- | |
555 | - | ||
556 | if (rdev->gart.pages) { |
- | |
557 | return 0; |
- | |
558 | } |
- | |
559 | /* We need PAGE_SIZE >= 4096 */ |
- | |
560 | if (PAGE_SIZE < 4096) { |
- | |
561 | DRM_ERROR("Page size is smaller than GPU page size!\n"); |
- | |
562 | return -EINVAL; |
- | |
563 | } |
- | |
564 | /* Compute table size */ |
- | |
565 | rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE; |
- | |
566 | rdev->gart.num_gpu_pages = rdev->mc.gtt_size / 4096; |
- | |
567 | DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n", |
- | |
568 | rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages); |
- | |
569 | /* Allocate pages table */ |
- | |
570 | rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages, |
- | |
571 | GFP_KERNEL); |
- | |
572 | if (rdev->gart.pages == NULL) { |
- | |
573 | // radeon_gart_fini(rdev); |
- | |
574 | return -ENOMEM; |
- | |
575 | } |
- | |
576 | rdev->gart.pages_addr = kzalloc(sizeof(u32_t) * |
- | |
577 | rdev->gart.num_cpu_pages, GFP_KERNEL); |
- | |
578 | if (rdev->gart.pages_addr == NULL) { |
- | |
579 | // radeon_gart_fini(rdev); |
- | |
580 | return -ENOMEM; |
- | |
581 | } |
- | |
582 | return 0; |
- | |
583 | } |
- | |
584 | - | ||
585 | int radeon_gart_table_vram_alloc(struct radeon_device *rdev) |
- | |
586 | { |
- | |
587 | uint32_t gpu_addr; |
- | |
588 | int r; |
- | |
589 | - | ||
590 | // if (rdev->gart.table.vram.robj == NULL) { |
- | |
591 | // r = radeon_object_create(rdev, NULL, |
- | |
592 | // rdev->gart.table_size, |
- | |
593 | // true, |
- | |
594 | // RADEON_GEM_DOMAIN_VRAM, |
- | |
595 | // false, &rdev->gart.table.vram.robj); |
- | |
596 | // if (r) { |
- | |
597 | // return r; |
- | |
598 | // } |
- | |
599 | // } |
- | |
600 | // r = radeon_object_pin(rdev->gart.table.vram.robj, |
- | |
601 | // RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
- | |
602 | // if (r) { |
- | |
603 | // radeon_object_unref(&rdev->gart.table.vram.robj); |
- | |
604 | // return r; |
- | |
605 | // } |
- | |
606 | // r = radeon_object_kmap(rdev->gart.table.vram.robj, |
- | |
607 | // (void **)&rdev->gart.table.vram.ptr); |
- | |
608 | // if (r) { |
- | |
609 | // radeon_object_unpin(rdev->gart.table.vram.robj); |
- | |
610 | // radeon_object_unref(&rdev->gart.table.vram.robj); |
- | |
611 | // DRM_ERROR("radeon: failed to map gart vram table.\n"); |
- | |
612 | // return r; |
- | |
613 | // } |
- | |
614 | - | ||
Line 615... | Line -... | ||
615 | gpu_addr = 0x800000; |
- | |
Line 616... | Line -... | ||
616 | - | ||
617 | u32_t pci_addr = rdev->mc.aper_base + gpu_addr; |
- | |
618 | - | ||
619 | rdev->gart.table.vram.ptr = (void*)MapIoMem(pci_addr, rdev->gart.table_size, PG_SW); |
- | |
620 | - | ||
621 | rdev->gart.table_addr = gpu_addr; |
- | |
622 | - | ||
Line 623... | Line 463... | ||
623 | dbgprintf("alloc gart vram:\n gpu_base %x pci_base %x lin_addr %x", |
463 | |
Line 624... | Line -... | ||
624 | gpu_addr, pci_addr, rdev->gart.table.vram.ptr); |
- | |
625 | - | ||
626 | return 0; |
- | |
627 | } |
- | |
628 | - | ||
629 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
- | |
630 | - | ||
631 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
- | |
632 | { |
- | |
633 | uint32_t table_addr; |
- | |
634 | uint32_t tmp; |
- | |
635 | int r; |
- | |
636 | - | ||
637 | dbgprintf("%s\n",__FUNCTION__); |
- | |
638 | - | ||
639 | /* Initialize common gart structure */ |
- | |
640 | r = radeon_gart_init(rdev); |
- | |
641 | if (r) { |
- | |
642 | return r; |
- | |
643 | } |
- | |
644 | // r = rv370_debugfs_pcie_gart_info_init(rdev); |
- | |
645 | // if (r) { |
- | |
646 | // DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
- | |
647 | // } |
- | |
648 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
- | |
649 | r = radeon_gart_table_vram_alloc(rdev); |
- | |
650 | if (r) { |
- | |
651 | return r; |
- | |
652 | } |
- | |
653 | /* discard memory request outside of configured range */ |
- | |
654 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
- | |
655 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
- | |
656 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location); |
- | |
657 | tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 4096; |
- | |
658 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
- | |
659 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
- | |
660 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
- | |
661 | table_addr = rdev->gart.table_addr; |
- | |
662 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
- | |
663 | /* FIXME: setup default page */ |
- | |
664 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location); |
- | |
665 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
- | |
666 | /* Clear error */ |
- | |
667 | WREG32_PCIE(0x18, 0); |
- | |
668 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
- | |
669 | tmp |= RADEON_PCIE_TX_GART_EN; |
- | |
670 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
- | |
671 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
- | |
672 | rv370_pcie_gart_tlb_flush(rdev); |
- | |
673 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", |
- | |
674 | rdev->mc.gtt_size >> 20, table_addr); |
- | |
675 | rdev->gart.ready = true; |
- | |
676 | return 0; |
- | |
677 | } |
- | |
678 | - | ||
679 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
- | |
680 | { |
- | |
681 | uint32_t tmp; |
- | |
682 | int i; |
- | |
683 | - | ||
684 | /* Workaround HW bug do flush 2 times */ |
- | |
685 | for (i = 0; i < 2; i++) { |
- | |
686 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
- | |
687 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
- | |
688 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
- | |
689 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
- | |
690 | mb(); |
- | |
691 | } |
- | |
692 | } |
- | |
693 | - | ||
694 | int r300_gart_enable(struct radeon_device *rdev) |
- | |
695 | { |
- | |
696 | #if __OS_HAS_AGP |
- | |
697 | if (rdev->flags & RADEON_IS_AGP) { |
- | |
698 | if (rdev->family > CHIP_RV350) { |
- | |
699 | rv370_pcie_gart_disable(rdev); |
- | |
700 | } else { |
- | |
701 | r100_pci_gart_disable(rdev); |
- | |
702 | } |
- | |
703 | return 0; |
- | |
704 | } |
- | |
705 | #endif |
- | |
706 | if (rdev->flags & RADEON_IS_PCIE) { |
- | |
Line 707... | Line 464... | ||
707 | rdev->asic->gart_disable = &rv370_pcie_gart_disable; |
464 | } |
708 | rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
465 | |
Line 739... | Line 496... | ||
739 | // } |
496 | // } |
740 | return 0; |
497 | return 0; |
741 | } |
498 | }>><>>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>><>> |
Line 742... | Line -... | ||
742 | - | ||
743 | - | ||
744 | int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
- | |
745 | int pages, u32_t *pagelist) |
- | |
746 | { |
- | |
747 | unsigned t; |
- | |
748 | unsigned p; |
- | |
749 | uint64_t page_base; |
- | |
750 | int i, j; |
- | |
751 | - | ||
752 | dbgprintf("%s\n\r",__FUNCTION__); |
- | |
753 | - | ||
754 | - | ||
755 | if (!rdev->gart.ready) { |
- | |
756 | DRM_ERROR("trying to bind memory to unitialized GART !\n"); |
- | |
757 | return -EINVAL; |
- | |
758 | } |
- | |
759 | t = offset / 4096; |
- | |
760 | p = t / (PAGE_SIZE / 4096); |
- | |
761 | - | ||
762 | for (i = 0; i < pages; i++, p++) { |
- | |
763 | /* we need to support large memory configurations */ |
- | |
764 | /* assume that unbind have already been call on the range */ |
- | |
Line 765... | Line -... | ||
765 | - | ||
766 | rdev->gart.pages_addr[p] = pagelist[i] & ~4095; |
- | |
767 | - | ||
768 | //if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) { |
- | |
769 | // /* FIXME: failed to map page (return -ENOMEM?) */ |
- | |
770 | // radeon_gart_unbind(rdev, offset, pages); |
- | |
771 | // return -ENOMEM; |
- | |
772 | //} |
- | |
773 | rdev->gart.pages[p] = pagelist[i]; |
- | |
774 | page_base = (uint32_t)rdev->gart.pages_addr[p]; |
- | |
775 | for (j = 0; j < (PAGE_SIZE / 4096); j++, t++) { |
- | |
776 | radeon_gart_set_page(rdev, t, page_base); |
- | |
777 | page_base += 4096; |
- | |
778 | } |
- | |
779 | } |
- | |
780 | mb(); |
- | |
781 | radeon_gart_tlb_flush(rdev); |
- | |
782 | - | ||
783 | dbgprintf("done %s\n",__FUNCTION__); |
- |