644,6 → 644,7 |
return r; |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; |
rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry; |
rdev->asic->gart.set_page = &r100_pci_gart_set_page; |
return radeon_gart_table_ram_alloc(rdev); |
} |
681,11 → 682,16 |
WREG32(RADEON_AIC_HI_ADDR, 0); |
} |
|
uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags) |
{ |
return addr; |
} |
|
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags) |
uint64_t entry) |
{ |
u32 *gtt = rdev->gart.ptr; |
gtt[i] = cpu_to_le32(lower_32_bits(addr)); |
gtt[i] = cpu_to_le32(lower_32_bits(entry)); |
} |
|
void r100_pci_gart_fini(struct radeon_device *rdev) |
722,6 → 728,10 |
tmp |= RADEON_FP2_DETECT_MASK; |
} |
WREG32(RADEON_GEN_INT_CNTL, tmp); |
|
/* read back to post the write */ |
RREG32(RADEON_GEN_INT_CNTL); |
|
return 0; |
} |
|
769,21 → 779,21 |
/* Vertical blank interrupts */ |
if (status & RADEON_CRTC_VBLANK_STAT) { |
if (rdev->irq.crtc_vblank_int[0]) { |
// drm_handle_vblank(rdev->ddev, 0); |
drm_handle_vblank(rdev->ddev, 0); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[0]) |
// radeon_crtc_handle_flip(rdev, 0); |
if (atomic_read(&rdev->irq.pflip[0])) |
radeon_crtc_handle_vblank(rdev, 0); |
} |
if (status & RADEON_CRTC2_VBLANK_STAT) { |
if (rdev->irq.crtc_vblank_int[1]) { |
// drm_handle_vblank(rdev->ddev, 1); |
drm_handle_vblank(rdev->ddev, 1); |
rdev->pm.vblank_sync = true; |
// wake_up(&rdev->irq.vblank_queue); |
wake_up(&rdev->irq.vblank_queue); |
} |
// if (rdev->irq.pflip[1]) |
// radeon_crtc_handle_flip(rdev, 1); |
if (atomic_read(&rdev->irq.pflip[1])) |
radeon_crtc_handle_vblank(rdev, 1); |
} |
if (status & RADEON_FP_DETECT_STAT) { |
queue_hotplug = true; |
3203,6 → 3213,9 |
uint32_t pixel_bytes1 = 0; |
uint32_t pixel_bytes2 = 0; |
|
/* Guess line buffer size to be 8192 pixels */ |
u32 lb_size = 8192; |
|
if (!rdev->mode_info.mode_config_initialized) |
return; |
|
3617,6 → 3630,13 |
DRM_DEBUG_KMS("GRPH2_BUFFER_CNTL from to %x\n", |
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
} |
|
/* Save number of lines the linebuffer leads before the scanout */ |
if (mode1) |
rdev->mode_info.crtcs[0]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode1->crtc_hdisplay); |
|
if (mode2) |
rdev->mode_info.crtcs[1]->lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode2->crtc_hdisplay); |
} |
|
int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring) |
3907,6 → 3927,24 |
return 0; |
} |
|
void r100_fini(struct radeon_device *rdev) |
{ |
radeon_pm_fini(rdev); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_gem_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
radeon_agp_fini(rdev); |
radeon_irq_kms_fini(rdev); |
radeon_fence_driver_fini(rdev); |
radeon_bo_fini(rdev); |
radeon_atombios_fini(rdev); |
kfree(rdev->bios); |
rdev->bios = NULL; |
} |
|
/* |
* Due to how kexec works, it can leave the hw fully initialised when it |
* boots the new kernel. However doing our init sequence with the CP and |
4006,6 → 4044,10 |
if (r) { |
/* Somethings want wront with the accel init stop accel */ |
dev_err(rdev->dev, "Disabling GPU acceleration\n"); |
r100_cp_fini(rdev); |
radeon_wb_fini(rdev); |
radeon_ib_pool_fini(rdev); |
radeon_irq_kms_fini(rdev); |
if (rdev->flags & RADEON_IS_PCI) |
r100_pci_gart_fini(rdev); |
rdev->accel_working = false; |
4013,6 → 4055,28 |
return 0; |
} |
|
uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t ret; |
|
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
return ret; |
} |
|
void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
} |
|
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) |
{ |
if (reg < rdev->rio_mem_size) |