50,6 → 50,31 |
*/ |
|
/* |
* Indirect registers accessor |
*/ |
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t r; |
|
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
r = RREG32(RADEON_PCIE_DATA); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
return r; |
} |
|
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
WREG32(RADEON_PCIE_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
} |
|
/* |
* rv370,rv380 PCIE GART |
*/ |
static int rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
73,11 → 98,8 |
#define R300_PTE_WRITEABLE (1 << 2) |
#define R300_PTE_READABLE (1 << 3) |
|
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags) |
uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) |
{ |
void __iomem *ptr = rdev->gart.ptr; |
|
addr = (lower_32_bits(addr) >> 8) | |
((upper_32_bits(addr) & 0xff) << 24); |
if (flags & RADEON_GART_PAGE_READ) |
86,10 → 108,18 |
addr |= R300_PTE_WRITEABLE; |
if (!(flags & RADEON_GART_PAGE_SNOOP)) |
addr |= R300_PTE_UNSNOOPED; |
return addr; |
} |
|
void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t entry) |
{ |
void __iomem *ptr = rdev->gart.ptr; |
|
/* on x86 we want this to be CPU endian, on powerpc |
* on powerpc without HW swappers, it'll get swapped on way |
* into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
writel(addr, ((void __iomem *)ptr) + (i * 4)); |
writel(entry, ((void __iomem *)ptr) + (i * 4)); |
} |
|
int rv370_pcie_gart_init(struct radeon_device *rdev) |
109,6 → 139,7 |
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; |
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
return radeon_gart_table_vram_alloc(rdev); |
} |
1411,6 → 1442,25 |
|
|
|
void r300_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
if (rdev->flags & RADEON_IS_PCIE) |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
|
int r300_init(struct radeon_device *rdev) |
{ |
1489,6 → 1539,10 |
if (r) { |
/* Something went wrong with the accel init, so stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->flags & RADEON_IS_PCIE) |
rv370_pcie_gart_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |