117,22 → 117,10 |
extern int radeon_use_pflipirq; |
extern int radeon_bapm; |
extern int radeon_backlight; |
extern int radeon_auxch; |
extern int radeon_mst; |
|
|
typedef struct pm_message { |
int event; |
} pm_message_t; |
|
typedef struct |
{ |
int width; |
int height; |
int bpp; |
int freq; |
}videomode_t; |
|
|
|
static inline u32 ioread32(const volatile void __iomem *addr) |
{ |
return in32((u32)addr); |
274,6 → 262,7 |
* Dummy page |
*/ |
struct radeon_dummy_page { |
uint64_t entry; |
struct page *page; |
dma_addr_t addr; |
}; |
535,6 → 524,7 |
pid_t pid; |
|
struct radeon_mn *mn; |
struct list_head mn_list; |
}; |
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
|
675,7 → 665,7 |
unsigned num_cpu_pages; |
unsigned table_size; |
struct page **pages; |
dma_addr_t *pages_addr; |
uint64_t *pages_entry; |
bool ready; |
}; |
|
746,7 → 736,7 |
resource_size_t size; |
u32 __iomem *ptr; |
u32 num_doorbells; /* Number of doorbells actually reserved for radeon. */ |
unsigned long used[DIV_ROUND_UP(RADEON_MAX_DOORBELLS, BITS_PER_LONG)]; |
DECLARE_BITMAP(used, RADEON_MAX_DOORBELLS); |
}; |
|
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); |
956,6 → 946,9 |
/* BOs freed, but not yet updated in the PT */ |
struct list_head freed; |
|
/* BOs cleared in the PT */ |
struct list_head cleared; |
|
/* contains the page directory */ |
struct radeon_bo *page_directory; |
unsigned max_pde_used; |
1582,6 → 1575,7 |
int new_active_crtc_count; |
u32 current_active_crtcs; |
int current_active_crtc_count; |
bool single_display; |
struct radeon_dpm_dynamic_state dyn_state; |
struct radeon_dpm_fan fan; |
u32 tdp_limit; |
1670,6 → 1664,7 |
u8 fan_max_rpm; |
/* dpm */ |
bool dpm_enabled; |
bool sysfs_initialized; |
struct radeon_dpm dpm; |
}; |
|
1687,7 → 1682,6 |
struct radeon_bo *vcpu_bo; |
void *cpu_addr; |
uint64_t gpu_addr; |
void *saved_bo; |
atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
unsigned img_size[RADEON_MAX_UVD_HANDLES]; |
1724,8 → 1718,6 |
* VCE |
*/ |
#define RADEON_MAX_VCE_HANDLES 16 |
#define RADEON_VCE_STACK_SIZE (1024*1024) |
#define RADEON_VCE_HEAP_SIZE (4*1024*1024) |
|
struct radeon_vce { |
struct radeon_bo *vcpu_bo; |
1736,6 → 1728,7 |
struct drm_file *filp[RADEON_MAX_VCE_HANDLES]; |
unsigned img_size[RADEON_MAX_VCE_HANDLES]; |
struct delayed_work idle_work; |
uint32_t keyselect; |
}; |
|
int radeon_vce_init(struct radeon_device *rdev); |
1775,6 → 1768,9 |
bool enabled; |
struct r600_audio_pin pin[RADEON_MAX_AFMT_BLOCKS]; |
int num_pins; |
struct radeon_audio_funcs *hdmi_funcs; |
struct radeon_audio_funcs *dp_funcs; |
struct radeon_audio_basic_funcs *funcs; |
}; |
|
/* |
1795,8 → 1791,16 |
/* |
* MMU Notifier |
*/ |
#if defined(CONFIG_MMU_NOTIFIER) |
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr); |
void radeon_mn_unregister(struct radeon_bo *bo); |
#else |
static inline int radeon_mn_register(struct radeon_bo *bo, unsigned long addr) |
{ |
return -ENODEV; |
} |
static inline void radeon_mn_unregister(struct radeon_bo *bo) {} |
#endif |
|
/* |
* Debugfs |
1862,11 → 1866,14 |
u32 (*get_xclk)(struct radeon_device *rdev); |
/* get the gpu clock counter */ |
uint64_t (*get_gpu_clock_counter)(struct radeon_device *rdev); |
/* get register for info ioctl */ |
int (*get_allowed_info_register)(struct radeon_device *rdev, u32 reg, u32 *val); |
/* gart */ |
struct { |
void (*tlb_flush)(struct radeon_device *rdev); |
uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags); |
void (*set_page)(struct radeon_device *rdev, unsigned i, |
uint64_t addr, uint32_t flags); |
uint64_t entry); |
} gart; |
struct { |
int (*init)(struct radeon_device *rdev); |
1985,6 → 1992,12 |
bool (*vblank_too_short)(struct radeon_device *rdev); |
void (*powergate_uvd)(struct radeon_device *rdev, bool gate); |
void (*enable_bapm)(struct radeon_device *rdev, bool enable); |
void (*fan_ctrl_set_mode)(struct radeon_device *rdev, u32 mode); |
u32 (*fan_ctrl_get_mode)(struct radeon_device *rdev); |
int (*set_fan_speed_percent)(struct radeon_device *rdev, u32 speed); |
int (*get_fan_speed_percent)(struct radeon_device *rdev, u32 *speed); |
u32 (*get_current_sclk)(struct radeon_device *rdev); |
u32 (*get_current_mclk)(struct radeon_device *rdev); |
} dpm; |
/* pageflipping */ |
struct { |
2394,6 → 2407,7 |
atomic64_t vram_usage; |
atomic64_t gtt_usage; |
atomic64_t num_bytes_moved; |
atomic_t gpu_reset_counter; |
/* ACPI interface */ |
struct radeon_atif atif; |
struct radeon_atcs atcs; |
2425,6 → 2439,8 |
|
#define RADEON_MIN_MMIO_SIZE 0x10000 |
|
uint32_t r100_mm_rreg_slow(struct radeon_device *rdev, uint32_t reg); |
void r100_mm_wreg_slow(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg, |
bool always_indirect) |
{ |
2431,33 → 2447,17 |
/* The mmio size is 64kb at minimum. Allows the if to be optimized out. */ |
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
unsigned long flags; |
uint32_t ret; |
|
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
ret = readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
|
return ret; |
else |
return r100_mm_rreg_slow(rdev, reg); |
} |
} |
|
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v, |
bool always_indirect) |
{ |
if ((reg < rdev->rmmio_size || reg < RADEON_MIN_MMIO_SIZE) && !always_indirect) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->mmio_idx_lock, flags); |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); |
else |
r100_mm_wreg_slow(rdev, reg, v); |
} |
} |
|
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); |
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
2532,6 → 2532,13 |
tmp_ |= ((val) & ~(mask)); \ |
WREG32_PLL(reg, tmp_); \ |
} while (0) |
#define WREG32_SMC_P(reg, val, mask) \ |
do { \ |
uint32_t tmp_ = RREG32_SMC(reg); \ |
tmp_ &= (mask); \ |
tmp_ |= ((val) & ~(mask)); \ |
WREG32_SMC(reg, tmp_); \ |
} while (0) |
#define DREG32_SYS(sqf, rdev, reg) seq_printf((sqf), #reg " : 0x%08X\n", r100_mm_rreg((rdev), (reg), false)) |
#define RREG32_IO(reg) r100_io_rreg(rdev, (reg)) |
#define WREG32_IO(reg, v) r100_io_wreg(rdev, (reg), (v)) |
2540,185 → 2547,30 |
#define WDOORBELL32(index, v) cik_mm_wdoorbell(rdev, (index), (v)) |
|
/* |
* Indirect registers accessor |
* Indirect registers accessors. |
* They used to be inlined, but this increases code size by ~65 kbytes. |
* Since each performs a pair of MMIO ops |
* within a spin_lock_irqsave/spin_unlock_irqrestore region, |
* the cost of call+ret is almost negligible. MMIO and locking |
* costs several dozens of cycles each at best, call+ret is ~5 cycles. |
*/ |
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
unsigned long flags; |
uint32_t r; |
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg); |
void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg); |
void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg); |
void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg); |
void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg); |
void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg); |
void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg); |
void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
|
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
r = RREG32(RADEON_PCIE_DATA); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
return r; |
} |
|
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
WREG32(RADEON_PCIE_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pcie_idx_lock, flags); |
} |
|
static inline u32 tn_smc_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
r = RREG32(TN_SMC_IND_DATA_0); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
return r; |
} |
|
static inline void tn_smc_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->smc_idx_lock, flags); |
WREG32(TN_SMC_IND_INDEX_0, (reg)); |
WREG32(TN_SMC_IND_DATA_0, (v)); |
spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); |
} |
|
static inline u32 r600_rcu_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->rcu_idx_lock, flags); |
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
r = RREG32(R600_RCU_DATA); |
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); |
return r; |
} |
|
static inline void r600_rcu_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->rcu_idx_lock, flags); |
WREG32(R600_RCU_INDEX, ((reg) & 0x1fff)); |
WREG32(R600_RCU_DATA, (v)); |
spin_unlock_irqrestore(&rdev->rcu_idx_lock, flags); |
} |
|
static inline u32 eg_cg_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->cg_idx_lock, flags); |
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_CG_IND_DATA); |
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); |
return r; |
} |
|
static inline void eg_cg_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->cg_idx_lock, flags); |
WREG32(EVERGREEN_CG_IND_ADDR, ((reg) & 0xffff)); |
WREG32(EVERGREEN_CG_IND_DATA, (v)); |
spin_unlock_irqrestore(&rdev->cg_idx_lock, flags); |
} |
|
static inline u32 eg_pif_phy0_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_PIF_PHY0_DATA); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
return r; |
} |
|
static inline void eg_pif_phy0_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY0_INDEX, ((reg) & 0xffff)); |
WREG32(EVERGREEN_PIF_PHY0_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
} |
|
static inline u32 eg_pif_phy1_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
r = RREG32(EVERGREEN_PIF_PHY1_DATA); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
return r; |
} |
|
static inline void eg_pif_phy1_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->pif_idx_lock, flags); |
WREG32(EVERGREEN_PIF_PHY1_INDEX, ((reg) & 0xffff)); |
WREG32(EVERGREEN_PIF_PHY1_DATA, (v)); |
spin_unlock_irqrestore(&rdev->pif_idx_lock, flags); |
} |
|
static inline u32 r600_uvd_ctx_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->uvd_idx_lock, flags); |
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
r = RREG32(R600_UVD_CTX_DATA); |
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); |
return r; |
} |
|
static inline void r600_uvd_ctx_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->uvd_idx_lock, flags); |
WREG32(R600_UVD_CTX_INDEX, ((reg) & 0x1ff)); |
WREG32(R600_UVD_CTX_DATA, (v)); |
spin_unlock_irqrestore(&rdev->uvd_idx_lock, flags); |
} |
|
|
static inline u32 cik_didt_rreg(struct radeon_device *rdev, u32 reg) |
{ |
unsigned long flags; |
u32 r; |
|
spin_lock_irqsave(&rdev->didt_idx_lock, flags); |
WREG32(CIK_DIDT_IND_INDEX, (reg)); |
r = RREG32(CIK_DIDT_IND_DATA); |
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); |
return r; |
} |
|
static inline void cik_didt_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
unsigned long flags; |
|
spin_lock_irqsave(&rdev->didt_idx_lock, flags); |
WREG32(CIK_DIDT_IND_INDEX, (reg)); |
WREG32(CIK_DIDT_IND_DATA, (v)); |
spin_unlock_irqrestore(&rdev->didt_idx_lock, flags); |
} |
|
void r100_pll_errata_after_index(struct radeon_device *rdev); |
|
|
2829,7 → 2681,8 |
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) |
#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f)) |
#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e)) |
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) |
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) |
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) |
2890,6 → 2743,7 |
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) |
#define radeon_get_xclk(rdev) (rdev)->asic->get_xclk((rdev)) |
#define radeon_get_gpu_clock_counter(rdev) (rdev)->asic->get_gpu_clock_counter((rdev)) |
#define radeon_get_allowed_info_register(rdev, r, v) (rdev)->asic->get_allowed_info_register((rdev), (r), (v)) |
#define radeon_dpm_init(rdev) rdev->asic->dpm.init((rdev)) |
#define radeon_dpm_setup_asic(rdev) rdev->asic->dpm.setup_asic((rdev)) |
#define radeon_dpm_enable(rdev) rdev->asic->dpm.enable((rdev)) |
2908,6 → 2762,8 |
#define radeon_dpm_vblank_too_short(rdev) rdev->asic->dpm.vblank_too_short((rdev)) |
#define radeon_dpm_powergate_uvd(rdev, g) rdev->asic->dpm.powergate_uvd((rdev), (g)) |
#define radeon_dpm_enable_bapm(rdev, e) rdev->asic->dpm.enable_bapm((rdev), (e)) |
#define radeon_dpm_get_current_sclk(rdev) rdev->asic->dpm.get_current_sclk((rdev)) |
#define radeon_dpm_get_current_mclk(rdev) rdev->asic->dpm.get_current_mclk((rdev)) |
|
/* Common functions */ |
/* AGP */ |
3074,6 → 2930,7 |
#include "radeon_object.h" |
|
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#define PCI_VENDOR_ID_ATI 0x1002 |
|
resource_size_t |
drm_get_resource_start(struct drm_device *dev, unsigned int resource); |