61,9 → 61,10 |
*/ |
|
#include <asm/atomic.h> |
|
#include <linux/wait.h> |
#include <linux/list.h> |
#include <linux/kref.h> |
#include <asm/div64.h> |
|
#include <ttm/ttm_bo_api.h> |
#include <ttm/ttm_bo_driver.h> |
74,7 → 75,6 |
#include <pci.h> |
|
#include <errno-base.h> |
#include "drm_edid.h" |
|
#include "radeon_family.h" |
#include "radeon_mode.h" |
82,8 → 82,6 |
|
#include <syscall.h> |
|
extern unsigned long volatile jiffies; |
|
/* |
* Modules parameters. |
*/ |
102,6 → 100,11 |
extern int radeon_disp_priority; |
extern int radeon_hw_i2c; |
extern int radeon_pcie_gen2; |
extern int radeon_msi; |
extern int radeon_lockup_timeout; |
|
|
|
typedef struct pm_message { |
int event; |
} pm_message_t; |
114,53 → 117,8 |
int freq; |
}videomode_t; |
|
static inline uint8_t __raw_readb(const volatile void __iomem *addr) |
{ |
return *(const volatile uint8_t __force *) addr; |
} |
|
static inline uint16_t __raw_readw(const volatile void __iomem *addr) |
{ |
return *(const volatile uint16_t __force *) addr; |
} |
|
static inline uint32_t __raw_readl(const volatile void __iomem *addr) |
{ |
return *(const volatile uint32_t __force *) addr; |
} |
|
#define readb __raw_readb |
#define readw __raw_readw |
#define readl __raw_readl |
|
|
|
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr) |
{ |
*(volatile uint8_t __force *) addr = b; |
} |
|
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr) |
{ |
*(volatile uint16_t __force *) addr = b; |
} |
|
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr) |
{ |
*(volatile uint32_t __force *) addr = b; |
} |
|
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr) |
{ |
*(volatile __u64 *)addr = b; |
} |
|
#define writeb __raw_writeb |
#define writew __raw_writew |
#define writel __raw_writel |
#define writeq __raw_writeq |
|
|
static inline u32 ioread32(const volatile void __iomem *addr) |
{ |
return in32((u32)addr); |
171,11 → 129,11 |
out32((u32)addr, b); |
} |
|
struct __wait_queue_head { |
spinlock_t lock; |
struct list_head task_list; |
}; |
typedef struct __wait_queue_head wait_queue_head_t; |
//struct __wait_queue_head { |
// spinlock_t lock; |
// struct list_head task_list; |
//}; |
//typedef struct __wait_queue_head wait_queue_head_t; |
|
|
/* |
186,10 → 144,29 |
#define RADEON_FENCE_JIFFIES_TIMEOUT (HZ / 2) |
/* RADEON_IB_POOL_SIZE must be a power of 2 */ |
#define RADEON_IB_POOL_SIZE 16 |
#define RADEON_DEBUGFS_MAX_NUM_FILES 32 |
#define RADEON_DEBUGFS_MAX_COMPONENTS 32 |
#define RADEONFB_CONN_LIMIT 4 |
#define RADEON_BIOS_NUM_SCRATCH 8 |
|
/* max number of rings */ |
#define RADEON_NUM_RINGS 3 |
|
/* fence seq are set to this number when signaled */ |
#define RADEON_FENCE_SIGNALED_SEQ 0LL |
|
/* internal ring indices */ |
/* r1xx+ has gfx CP ring */ |
#define RADEON_RING_TYPE_GFX_INDEX 0 |
|
/* cayman has 2 compute CP rings */ |
#define CAYMAN_RING_TYPE_CP1_INDEX 1 |
#define CAYMAN_RING_TYPE_CP2_INDEX 2 |
|
/* hardcode those limit for now */ |
#define RADEON_VA_IB_OFFSET (1 << 20) |
#define RADEON_VA_RESERVED_SIZE (8 << 20) |
#define RADEON_IB_VM_MAX_SIZE (64 << 10) |
|
/* |
* Errata workarounds. |
*/ |
206,24 → 183,8 |
/* |
* BIOS. |
*/ |
#define ATRM_BIOS_PAGE 4096 |
|
#if defined(CONFIG_VGA_SWITCHEROO) |
bool radeon_atrm_supported(struct pci_dev *pdev); |
int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); |
#else |
static inline bool radeon_atrm_supported(struct pci_dev *pdev) |
{ |
return false; |
} |
|
static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ |
return -EINVAL; |
} |
#endif |
bool radeon_get_bios(struct radeon_device *rdev); |
|
|
/* |
* Dummy page |
*/ |
263,12 → 224,15 |
void radeon_combios_get_power_modes(struct radeon_device *rdev); |
void radeon_atombios_get_power_modes(struct radeon_device *rdev); |
void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type); |
int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage); |
void rs690_pm_info(struct radeon_device *rdev); |
extern int rv6xx_get_temp(struct radeon_device *rdev); |
extern int rv770_get_temp(struct radeon_device *rdev); |
extern int evergreen_get_temp(struct radeon_device *rdev); |
extern int sumo_get_temp(struct radeon_device *rdev); |
extern int si_get_temp(struct radeon_device *rdev); |
extern void evergreen_tiling_fields(unsigned tiling_flags, unsigned *bankw, |
unsigned *bankh, unsigned *mtaspect, |
unsigned *tile_split); |
|
/* |
* Fences. |
275,15 → 239,12 |
*/ |
struct radeon_fence_driver { |
uint32_t scratch_reg; |
atomic_t seq; |
uint32_t last_seq; |
unsigned long last_jiffies; |
unsigned long last_timeout; |
wait_queue_head_t queue; |
rwlock_t lock; |
struct list_head created; |
struct list_head emited; |
struct list_head signaled; |
uint64_t gpu_addr; |
volatile uint32_t *cpu_addr; |
/* sync_seq is protected by ring emission lock */ |
uint64_t sync_seq[RADEON_NUM_RINGS]; |
atomic64_t last_seq; |
unsigned long last_activity; |
bool initialized; |
}; |
|
290,26 → 251,65 |
struct radeon_fence { |
struct radeon_device *rdev; |
struct kref kref; |
struct list_head list; |
/* protected by radeon_fence.lock */ |
uint32_t seq; |
bool emited; |
bool signaled; |
evhandle_t evnt; |
uint64_t seq; |
/* RB, DMA, etc. */ |
unsigned ring; |
}; |
|
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); |
int radeon_fence_driver_init(struct radeon_device *rdev); |
void radeon_fence_driver_fini(struct radeon_device *rdev); |
int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence); |
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); |
void radeon_fence_process(struct radeon_device *rdev); |
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring); |
void radeon_fence_process(struct radeon_device *rdev, int ring); |
bool radeon_fence_signaled(struct radeon_fence *fence); |
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible); |
int radeon_fence_wait_next(struct radeon_device *rdev); |
int radeon_fence_wait_last(struct radeon_device *rdev); |
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring); |
void radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring); |
int radeon_fence_wait_any(struct radeon_device *rdev, |
struct radeon_fence **fences, |
bool intr); |
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); |
void radeon_fence_unref(struct radeon_fence **fence); |
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring); |
bool radeon_fence_need_sync(struct radeon_fence *fence, int ring); |
void radeon_fence_note_sync(struct radeon_fence *fence, int ring); |
static inline struct radeon_fence *radeon_fence_later(struct radeon_fence *a, |
struct radeon_fence *b) |
{ |
if (!a) { |
return b; |
} |
|
if (!b) { |
return a; |
} |
|
BUG_ON(a->ring != b->ring); |
|
if (a->seq > b->seq) { |
return a; |
} else { |
return b; |
} |
} |
|
static inline bool radeon_fence_is_earlier(struct radeon_fence *a, |
struct radeon_fence *b) |
{ |
if (!a) { |
return false; |
} |
|
if (!b) { |
return true; |
} |
|
BUG_ON(a->ring != b->ring); |
|
return a->seq < b->seq; |
} |
|
/* |
* Tiling registers |
*/ |
330,6 → 330,24 |
bool initialized; |
}; |
|
/* bo virtual address in a specific vm */ |
struct radeon_bo_va { |
/* protected by bo being reserved */ |
struct list_head bo_list; |
uint64_t soffset; |
uint64_t eoffset; |
uint32_t flags; |
bool valid; |
unsigned ref_count; |
|
/* protected by vm mutex */ |
struct list_head vm_list; |
|
/* constant after initialization */ |
struct radeon_vm *vm; |
struct radeon_bo *bo; |
}; |
|
struct radeon_bo { |
/* Protected by gem.mutex */ |
struct list_head list; |
345,10 → 363,15 |
u32 tiling_flags; |
u32 pitch; |
int surface_reg; |
/* list of all virtual address to which this bo |
* is associated to |
*/ |
struct list_head va; |
/* Constant after initialization */ |
struct radeon_device *rdev; |
struct drm_gem_object gem_base; |
u32 domain; |
int vmapping_count; |
}; |
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base) |
|
360,6 → 383,53 |
u32 tiling_flags; |
}; |
|
/* sub-allocation manager, it has to be protected by another lock. |
* By conception this is an helper for other part of the driver |
* like the indirect buffer or semaphore, which both have their |
* locking. |
* |
* Principe is simple, we keep a list of sub allocation in offset |
* order (first entry has offset == 0, last entry has the highest |
* offset). |
* |
* When allocating new object we first check if there is room at |
* the end total_size - (last_object_offset + last_object_size) >= |
* alloc_size. If so we allocate new object there. |
* |
* When there is not enough room at the end, we start waiting for |
* each sub object until we reach object_offset+object_size >= |
* alloc_size, this object then become the sub object we return. |
* |
* Alignment can't be bigger than page size. |
* |
* Hole are not considered for allocation to keep things simple. |
* Assumption is that there won't be hole (all object on same |
* alignment). |
*/ |
struct radeon_sa_manager { |
wait_queue_head_t wq; |
struct radeon_bo *bo; |
struct list_head *hole; |
struct list_head flist[RADEON_NUM_RINGS]; |
struct list_head olist; |
unsigned size; |
uint64_t gpu_addr; |
void *cpu_ptr; |
uint32_t domain; |
}; |
|
struct radeon_sa_bo; |
|
/* sub-allocation buffer */ |
struct radeon_sa_bo { |
struct list_head olist; |
struct list_head flist; |
struct radeon_sa_manager *manager; |
unsigned soffset; |
unsigned eoffset; |
struct radeon_fence *fence; |
}; |
|
/* |
* GEM objects. |
*/ |
374,9 → 444,6 |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
struct drm_gem_object **obj); |
int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain, |
uint64_t *gpu_addr); |
void radeon_gem_object_unpin(struct drm_gem_object *obj); |
|
int radeon_mode_dumb_create(struct drm_file *file_priv, |
struct drm_device *dev, |
389,36 → 456,47 |
uint32_t handle); |
|
/* |
* GART structures, functions & helpers |
* Semaphores. |
*/ |
struct radeon_mc; |
|
struct radeon_gart_table_ram { |
volatile uint32_t *ptr; |
/* everything here is constant */ |
struct radeon_semaphore { |
struct radeon_sa_bo *sa_bo; |
signed waiters; |
uint64_t gpu_addr; |
}; |
|
struct radeon_gart_table_vram { |
struct radeon_bo *robj; |
volatile uint32_t *ptr; |
}; |
int radeon_semaphore_create(struct radeon_device *rdev, |
struct radeon_semaphore **semaphore); |
void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring, |
struct radeon_semaphore *semaphore); |
void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, |
struct radeon_semaphore *semaphore); |
int radeon_semaphore_sync_rings(struct radeon_device *rdev, |
struct radeon_semaphore *semaphore, |
int signaler, int waiter); |
void radeon_semaphore_free(struct radeon_device *rdev, |
struct radeon_semaphore **semaphore, |
struct radeon_fence *fence); |
|
union radeon_gart_table { |
struct radeon_gart_table_ram ram; |
struct radeon_gart_table_vram vram; |
}; |
/* |
* GART structures, functions & helpers |
*/ |
struct radeon_mc; |
|
#define RADEON_GPU_PAGE_SIZE 4096 |
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1) |
#define RADEON_GPU_PAGE_SHIFT 12 |
#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK) |
|
struct radeon_gart { |
dma_addr_t table_addr; |
struct radeon_bo *robj; |
void *ptr; |
unsigned num_gpu_pages; |
unsigned num_cpu_pages; |
unsigned table_size; |
union radeon_gart_table table; |
struct page **pages; |
dma_addr_t *pages_addr; |
bool *ttm_alloced; |
bool ready; |
}; |
|
426,12 → 504,16 |
void radeon_gart_table_ram_free(struct radeon_device *rdev); |
int radeon_gart_table_vram_alloc(struct radeon_device *rdev); |
void radeon_gart_table_vram_free(struct radeon_device *rdev); |
int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
void radeon_gart_table_vram_unpin(struct radeon_device *rdev); |
int radeon_gart_init(struct radeon_device *rdev); |
void radeon_gart_fini(struct radeon_device *rdev); |
void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset, |
int pages); |
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
int pages, u32_t *pagelist); |
int pages, u32 *pagelist, |
dma_addr_t *dma_addr); |
void radeon_gart_restore(struct radeon_device *rdev); |
|
|
/* |
480,6 → 562,7 |
*/ |
struct r500_irq_stat_regs { |
u32 disp_int; |
u32 hdmi0_status; |
}; |
|
struct r600_irq_stat_regs { |
488,6 → 571,8 |
u32 disp_int_cont2; |
u32 d1grph_int; |
u32 d2grph_int; |
u32 hdmi0_status; |
u32 hdmi1_status; |
}; |
|
struct evergreen_irq_stat_regs { |
503,6 → 588,12 |
u32 d4grph_int; |
u32 d5grph_int; |
u32 d6grph_int; |
u32 afmt_status1; |
u32 afmt_status2; |
u32 afmt_status3; |
u32 afmt_status4; |
u32 afmt_status5; |
u32 afmt_status6; |
}; |
|
union radeon_irq_stat_regs { |
511,77 → 602,133 |
struct evergreen_irq_stat_regs evergreen; |
}; |
|
#define RADEON_MAX_HPD_PINS 6 |
#define RADEON_MAX_CRTCS 6 |
#define RADEON_MAX_AFMT_BLOCKS 6 |
|
struct radeon_irq { |
bool installed; |
bool sw_int; |
/* FIXME: use a define max crtc rather than hardcode it */ |
bool crtc_vblank_int[6]; |
bool pflip[6]; |
spinlock_t lock; |
atomic_t ring_int[RADEON_NUM_RINGS]; |
bool crtc_vblank_int[RADEON_MAX_CRTCS]; |
atomic_t pflip[RADEON_MAX_CRTCS]; |
wait_queue_head_t vblank_queue; |
/* FIXME: use defines for max hpd/dacs */ |
bool hpd[6]; |
bool gui_idle; |
bool gui_idle_acked; |
wait_queue_head_t idle_queue; |
/* FIXME: use defines for max HDMI blocks */ |
bool hdmi[2]; |
spinlock_t sw_lock; |
int sw_refcount; |
bool hpd[RADEON_MAX_HPD_PINS]; |
bool afmt[RADEON_MAX_AFMT_BLOCKS]; |
union radeon_irq_stat_regs stat_regs; |
spinlock_t pflip_lock[6]; |
int pflip_refcount[6]; |
}; |
|
int radeon_irq_kms_init(struct radeon_device *rdev); |
void radeon_irq_kms_fini(struct radeon_device *rdev); |
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev); |
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev); |
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring); |
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring); |
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc); |
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc); |
void radeon_irq_kms_enable_afmt(struct radeon_device *rdev, int block); |
void radeon_irq_kms_disable_afmt(struct radeon_device *rdev, int block); |
void radeon_irq_kms_enable_hpd(struct radeon_device *rdev, unsigned hpd_mask); |
void radeon_irq_kms_disable_hpd(struct radeon_device *rdev, unsigned hpd_mask); |
|
/* |
* CP & ring. |
* CP & rings. |
*/ |
|
struct radeon_ib { |
struct list_head list; |
unsigned idx; |
struct radeon_sa_bo *sa_bo; |
uint32_t length_dw; |
uint64_t gpu_addr; |
uint32_t *ptr; |
int ring; |
struct radeon_fence *fence; |
uint32_t *ptr; |
uint32_t length_dw; |
bool free; |
struct radeon_vm *vm; |
bool is_const_ib; |
struct radeon_fence *sync_to[RADEON_NUM_RINGS]; |
struct radeon_semaphore *semaphore; |
}; |
|
/* |
* locking - |
* mutex protects scheduled_ibs, ready, alloc_bm |
*/ |
struct radeon_ib_pool { |
struct mutex mutex; |
struct radeon_bo *robj; |
struct list_head bogus_ib; |
struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; |
bool ready; |
unsigned head_id; |
}; |
|
struct radeon_cp { |
struct radeon_ring { |
struct radeon_bo *ring_obj; |
volatile uint32_t *ring; |
unsigned rptr; |
unsigned rptr_offs; |
unsigned rptr_reg; |
unsigned rptr_save_reg; |
u64 next_rptr_gpu_addr; |
volatile u32 *next_rptr_cpu_addr; |
unsigned wptr; |
unsigned wptr_old; |
unsigned wptr_reg; |
unsigned ring_size; |
unsigned ring_free_dw; |
int count_dw; |
unsigned long last_activity; |
unsigned last_rptr; |
uint64_t gpu_addr; |
uint32_t align_mask; |
uint32_t ptr_mask; |
struct mutex mutex; |
bool ready; |
u32 ptr_reg_shift; |
u32 ptr_reg_mask; |
u32 nop; |
u32 idx; |
}; |
|
/* |
* VM |
*/ |
|
/* maximum number of VMIDs */ |
#define RADEON_NUM_VM 16 |
|
/* defines number of bits in page table versus page directory, |
* a page is 4KB so we have 12 bits offset, 9 bits in the page |
* table and the remaining 19 bits are in the page directory */ |
#define RADEON_VM_BLOCK_SIZE 9 |
|
/* number of entries in page table */ |
#define RADEON_VM_PTE_COUNT (1 << RADEON_VM_BLOCK_SIZE) |
|
struct radeon_vm { |
struct list_head list; |
struct list_head va; |
unsigned id; |
|
/* contains the page directory */ |
struct radeon_sa_bo *page_directory; |
uint64_t pd_gpu_addr; |
|
/* array of page tables, one for each page directory entry */ |
struct radeon_sa_bo **page_tables; |
|
struct mutex mutex; |
/* last fence for cs using this vm */ |
struct radeon_fence *fence; |
/* last flush or NULL if we still need to flush */ |
struct radeon_fence *last_flush; |
}; |
|
struct radeon_vm_manager { |
struct mutex lock; |
struct list_head lru_vm; |
struct radeon_fence *active[RADEON_NUM_VM]; |
struct radeon_sa_manager sa_manager; |
uint32_t max_pfn; |
/* number of VMIDs */ |
unsigned nvm; |
/* vram base address for page table entry */ |
u64 vram_base_offset; |
/* is vm enabled? */ |
bool enabled; |
}; |
|
/* |
* file private structure |
*/ |
struct radeon_fpriv { |
struct radeon_vm vm; |
}; |
|
/* |
* R6xx+ IH ring |
*/ |
struct r600_ih { |
588,43 → 735,85 |
struct radeon_bo *ring_obj; |
volatile uint32_t *ring; |
unsigned rptr; |
unsigned wptr; |
unsigned wptr_old; |
unsigned ring_size; |
uint64_t gpu_addr; |
uint32_t ptr_mask; |
spinlock_t lock; |
atomic_t lock; |
bool enabled; |
}; |
|
struct r600_blit_cp_primitives { |
void (*set_render_target)(struct radeon_device *rdev, int format, |
int w, int h, u64 gpu_addr); |
void (*cp_set_surface_sync)(struct radeon_device *rdev, |
u32 sync_type, u32 size, |
u64 mc_addr); |
void (*set_shaders)(struct radeon_device *rdev); |
void (*set_vtx_resource)(struct radeon_device *rdev, u64 gpu_addr); |
void (*set_tex_resource)(struct radeon_device *rdev, |
int format, int w, int h, int pitch, |
u64 gpu_addr, u32 size); |
void (*set_scissors)(struct radeon_device *rdev, int x1, int y1, |
int x2, int y2); |
void (*draw_auto)(struct radeon_device *rdev); |
void (*set_default_state)(struct radeon_device *rdev); |
}; |
|
struct r600_blit { |
struct mutex mutex; |
struct radeon_bo *shader_obj; |
struct r600_blit_cp_primitives primitives; |
int max_dim; |
int ring_size_common; |
int ring_size_per_loop; |
u64 shader_gpu_addr; |
u32 vs_offset, ps_offset; |
u32 state_offset; |
u32 state_len; |
u32 vb_used, vb_total; |
struct radeon_ib *vb_ib; |
}; |
|
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); |
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); |
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); |
/* |
* SI RLC stuff |
*/ |
struct si_rlc { |
/* for power gating */ |
struct radeon_bo *save_restore_obj; |
uint64_t save_restore_gpu_addr; |
/* for clear state */ |
struct radeon_bo *clear_state_obj; |
uint64_t clear_state_gpu_addr; |
}; |
|
int radeon_ib_get(struct radeon_device *rdev, int ring, |
struct radeon_ib *ib, struct radeon_vm *vm, |
unsigned size); |
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib); |
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib, |
struct radeon_ib *const_ib); |
int radeon_ib_pool_init(struct radeon_device *rdev); |
void radeon_ib_pool_fini(struct radeon_device *rdev); |
int radeon_ib_test(struct radeon_device *rdev); |
extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib); |
int radeon_ib_ring_tests(struct radeon_device *rdev); |
/* Ring access between begin & end cannot sleep */ |
void radeon_ring_free_size(struct radeon_device *rdev); |
int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw); |
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw); |
void radeon_ring_commit(struct radeon_device *rdev); |
void radeon_ring_unlock_commit(struct radeon_device *rdev); |
void radeon_ring_unlock_undo(struct radeon_device *rdev); |
int radeon_ring_test(struct radeon_device *rdev); |
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size); |
void radeon_ring_fini(struct radeon_device *rdev); |
bool radeon_ring_supports_scratch_reg(struct radeon_device *rdev, |
struct radeon_ring *ring); |
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp); |
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); |
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw); |
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp); |
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp); |
void radeon_ring_undo(struct radeon_ring *ring); |
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp); |
int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp); |
void radeon_ring_force_activity(struct radeon_device *rdev, struct radeon_ring *ring); |
void radeon_ring_lockup_update(struct radeon_ring *ring); |
bool radeon_ring_test_lockup(struct radeon_device *rdev, struct radeon_ring *ring); |
unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring, |
uint32_t **data); |
int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring, |
unsigned size, uint32_t *data); |
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size, |
unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg, |
u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop); |
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp); |
|
|
/* |
667,41 → 856,21 |
/* indices of various chunks */ |
int chunk_ib_idx; |
int chunk_relocs_idx; |
struct radeon_ib *ib; |
int chunk_flags_idx; |
int chunk_const_ib_idx; |
struct radeon_ib ib; |
struct radeon_ib const_ib; |
void *track; |
unsigned family; |
int parser_error; |
u32 cs_flags; |
u32 ring; |
s32 priority; |
}; |
|
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); |
extern int radeon_cs_finish_pages(struct radeon_cs_parser *p); |
extern u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx); |
|
|
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) |
{ |
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; |
u32 pg_idx, pg_offset; |
u32 idx_value = 0; |
int new_page; |
|
pg_idx = (idx * 4) / PAGE_SIZE; |
pg_offset = (idx * 4) % PAGE_SIZE; |
|
if (ibc->kpage_idx[0] == pg_idx) |
return ibc->kpage[0][pg_offset/4]; |
if (ibc->kpage_idx[1] == pg_idx) |
return ibc->kpage[1][pg_offset/4]; |
|
new_page = radeon_cs_update_pages(p, pg_idx); |
if (new_page < 0) { |
p->parser_error = new_page; |
return 0; |
} |
|
idx_value = ibc->kpage[new_page][pg_offset/4]; |
return idx_value; |
} |
|
struct radeon_cs_packet { |
unsigned idx; |
unsigned type; |
739,6 → 908,7 |
}; |
|
#define RADEON_WB_SCRATCH_OFFSET 0 |
#define RADEON_WB_RING0_NEXT_RPTR 256 |
#define RADEON_WB_CP_RPTR_OFFSET 1024 |
#define RADEON_WB_CP1_RPTR_OFFSET 1280 |
#define RADEON_WB_CP2_RPTR_OFFSET 1536 |
831,6 → 1001,7 |
THERMAL_TYPE_EVERGREEN, |
THERMAL_TYPE_SUMO, |
THERMAL_TYPE_NI, |
THERMAL_TYPE_SI, |
}; |
|
struct radeon_voltage { |
868,8 → 1039,7 |
|
struct radeon_power_state { |
enum radeon_pm_state_type type; |
/* XXX: use a define for num clock modes */ |
struct radeon_pm_clock_info clock_info[8]; |
struct radeon_pm_clock_info *clock_info; |
/* number of valid clock modes in this power state */ |
int num_clock_modes; |
struct radeon_pm_clock_info *default_clock_mode; |
887,11 → 1057,12 |
|
struct radeon_pm { |
struct mutex mutex; |
/* write locked while reprogramming mclk */ |
struct rw_semaphore mclk_lock; |
u32 active_crtcs; |
int active_crtc_count; |
int req_vblank; |
bool vblank_sync; |
bool gui_idle; |
fixed20_12 max_bandwidth; |
fixed20_12 igp_sideport_mclk; |
fixed20_12 igp_system_mclk; |
939,6 → 1110,17 |
struct device *int_hwmon_dev; |
}; |
|
int radeon_pm_get_type_index(struct radeon_device *rdev, |
enum radeon_pm_state_type ps_type, |
int instance); |
|
struct r600_audio { |
int channels; |
int rate; |
int bits_per_sample; |
u8 status_bits; |
u8 category_code; |
}; |
/* |
* ASIC specific functions. |
*/ |
948,37 → 1130,108 |
int (*resume)(struct radeon_device *rdev); |
int (*suspend)(struct radeon_device *rdev); |
void (*vga_set_state)(struct radeon_device *rdev, bool state); |
bool (*gpu_is_lockup)(struct radeon_device *rdev); |
int (*asic_reset)(struct radeon_device *rdev); |
void (*gart_tlb_flush)(struct radeon_device *rdev); |
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr); |
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); |
void (*cp_fini)(struct radeon_device *rdev); |
void (*cp_disable)(struct radeon_device *rdev); |
void (*cp_commit)(struct radeon_device *rdev); |
void (*ring_start)(struct radeon_device *rdev); |
int (*ring_test)(struct radeon_device *rdev); |
void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
int (*irq_set)(struct radeon_device *rdev); |
int (*irq_process)(struct radeon_device *rdev); |
/* ioctl hw specific callback. Some hw might want to perform special |
* operation on specific ioctl. For instance on wait idle some hw |
* might want to perform and HDP flush through MMIO as it seems that |
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed |
* through ring. |
*/ |
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); |
/* check if 3D engine is idle */ |
bool (*gui_idle)(struct radeon_device *rdev); |
/* wait for mc_idle */ |
int (*mc_wait_for_idle)(struct radeon_device *rdev); |
/* gart */ |
struct { |
void (*tlb_flush)(struct radeon_device *rdev); |
int (*set_page)(struct radeon_device *rdev, int i, uint64_t addr); |
} gart; |
struct { |
int (*init)(struct radeon_device *rdev); |
void (*fini)(struct radeon_device *rdev); |
|
u32 pt_ring_index; |
void (*set_page)(struct radeon_device *rdev, uint64_t pe, |
uint64_t addr, unsigned count, |
uint32_t incr, uint32_t flags); |
} vm; |
/* ring specific callbacks */ |
struct { |
void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib); |
void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence); |
void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, |
struct radeon_semaphore *semaphore, bool emit_wait); |
int (*cs_parse)(struct radeon_cs_parser *p); |
void (*ring_start)(struct radeon_device *rdev, struct radeon_ring *cp); |
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); |
int (*ib_test)(struct radeon_device *rdev, struct radeon_ring *cp); |
bool (*is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp); |
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); |
} ring[RADEON_NUM_RINGS]; |
/* irqs */ |
struct { |
int (*set)(struct radeon_device *rdev); |
int (*process)(struct radeon_device *rdev); |
} irq; |
/* displays */ |
struct { |
/* display watermarks */ |
void (*bandwidth_update)(struct radeon_device *rdev); |
/* get frame count */ |
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); |
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); |
int (*cs_parse)(struct radeon_cs_parser *p); |
int (*copy_blit)(struct radeon_device *rdev, |
/* wait for vblank */ |
void (*wait_for_vblank)(struct radeon_device *rdev, int crtc); |
/* set backlight level */ |
void (*set_backlight_level)(struct radeon_encoder *radeon_encoder, u8 level); |
/* get backlight level */ |
u8 (*get_backlight_level)(struct radeon_encoder *radeon_encoder); |
} display; |
/* copy functions for bo handling */ |
struct { |
int (*blit)(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_pages, |
struct radeon_fence *fence); |
int (*copy_dma)(struct radeon_device *rdev, |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
u32 blit_ring_index; |
int (*dma)(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_pages, |
struct radeon_fence *fence); |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
u32 dma_ring_index; |
/* method used for bo copy */ |
int (*copy)(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
unsigned num_pages, |
struct radeon_fence *fence); |
unsigned num_gpu_pages, |
struct radeon_fence **fence); |
/* ring used for bo copies */ |
u32 copy_ring_index; |
} copy; |
/* surfaces */ |
struct { |
int (*set_reg)(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size); |
void (*clear_reg)(struct radeon_device *rdev, int reg); |
} surface; |
/* hotplug detect */ |
struct { |
void (*init)(struct radeon_device *rdev); |
void (*fini)(struct radeon_device *rdev); |
bool (*sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
void (*set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
} hpd; |
/* power management */ |
struct { |
void (*misc)(struct radeon_device *rdev); |
void (*prepare)(struct radeon_device *rdev); |
void (*finish)(struct radeon_device *rdev); |
void (*init_profile)(struct radeon_device *rdev); |
void (*get_dynpm_state)(struct radeon_device *rdev); |
uint32_t (*get_engine_clock)(struct radeon_device *rdev); |
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock); |
uint32_t (*get_memory_clock)(struct radeon_device *rdev); |
986,48 → 1239,22 |
int (*get_pcie_lanes)(struct radeon_device *rdev); |
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
int (*set_surface_reg)(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size); |
void (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
void (*bandwidth_update)(struct radeon_device *rdev); |
void (*hpd_init)(struct radeon_device *rdev); |
void (*hpd_fini)(struct radeon_device *rdev); |
bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd); |
/* ioctl hw specific callback. Some hw might want to perform special |
* operation on specific ioctl. For instance on wait idle some hw |
* might want to perform and HDP flush through MMIO as it seems that |
* some R6XX/R7XX hw doesn't take HDP flush into account if programmed |
* through ring. |
*/ |
void (*ioctl_wait_idle)(struct radeon_device *rdev, struct radeon_bo *bo); |
bool (*gui_idle)(struct radeon_device *rdev); |
/* power management */ |
void (*pm_misc)(struct radeon_device *rdev); |
void (*pm_prepare)(struct radeon_device *rdev); |
void (*pm_finish)(struct radeon_device *rdev); |
void (*pm_init_profile)(struct radeon_device *rdev); |
void (*pm_get_dynpm_state)(struct radeon_device *rdev); |
} pm; |
/* pageflipping */ |
struct { |
void (*pre_page_flip)(struct radeon_device *rdev, int crtc); |
u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base); |
void (*post_page_flip)(struct radeon_device *rdev, int crtc); |
} pflip; |
}; |
|
/* |
* Asic structures |
*/ |
struct r100_gpu_lockup { |
unsigned long last_jiffies; |
u32 last_cp_rptr; |
}; |
|
struct r100_asic { |
const unsigned *reg_safe_bm; |
unsigned reg_safe_bm_size; |
u32 hdp_cntl; |
struct r100_gpu_lockup lockup; |
}; |
|
struct r300_asic { |
1035,7 → 1262,6 |
unsigned reg_safe_bm_size; |
u32 resync_scratch; |
u32 hdp_cntl; |
struct r100_gpu_lockup lockup; |
}; |
|
struct r600_asic { |
1057,7 → 1283,6 |
unsigned tiling_group_size; |
unsigned tile_config; |
unsigned backend_map; |
struct r100_gpu_lockup lockup; |
}; |
|
struct rv770_asic { |
1083,7 → 1308,6 |
unsigned tiling_group_size; |
unsigned tile_config; |
unsigned backend_map; |
struct r100_gpu_lockup lockup; |
}; |
|
struct evergreen_asic { |
1110,7 → 1334,6 |
unsigned tiling_group_size; |
unsigned tile_config; |
unsigned backend_map; |
struct r100_gpu_lockup lockup; |
}; |
|
struct cayman_asic { |
1149,9 → 1372,37 |
unsigned multi_gpu_tile_size; |
|
unsigned tile_config; |
struct r100_gpu_lockup lockup; |
}; |
|
struct si_asic { |
unsigned max_shader_engines; |
unsigned max_tile_pipes; |
unsigned max_cu_per_sh; |
unsigned max_sh_per_se; |
unsigned max_backends_per_se; |
unsigned max_texture_channel_caches; |
unsigned max_gprs; |
unsigned max_gs_threads; |
unsigned max_hw_contexts; |
unsigned sc_prim_fifo_size_frontend; |
unsigned sc_prim_fifo_size_backend; |
unsigned sc_hiz_tile_fifo_size; |
unsigned sc_earlyz_tile_fifo_size; |
|
unsigned num_tile_pipes; |
unsigned num_backends_per_se; |
unsigned backend_disable_mask_per_asic; |
unsigned backend_map; |
unsigned num_texture_channel_caches; |
unsigned mem_max_burst_length_bytes; |
unsigned mem_row_size_in_kb; |
unsigned shader_engine_tile_size; |
unsigned num_gpus; |
unsigned multi_gpu_tile_size; |
|
unsigned tile_config; |
}; |
|
union radeon_asic_config { |
struct r300_asic r300; |
struct r100_asic r100; |
1159,6 → 1410,7 |
struct rv770_asic rv770; |
struct evergreen_asic evergreen; |
struct cayman_asic cayman; |
struct si_asic si; |
}; |
|
/* |
1169,12 → 1421,14 |
|
|
|
/* VRAM scratch page for HDP bug */ |
struct r700_vram_scratch { |
/* VRAM scratch page for HDP bug, default vram page */ |
struct r600_vram_scratch { |
struct radeon_bo *robj; |
volatile uint32_t *ptr; |
u64 gpu_addr; |
}; |
|
|
/* |
* Core structure, functions and helpers. |
*/ |
1185,6 → 1439,7 |
struct device *dev; |
struct drm_device *ddev; |
struct pci_dev *pdev; |
struct rw_semaphore exclusive_lock; |
/* ASIC */ |
union radeon_asic_config config; |
enum radeon_family family; |
1202,7 → 1457,7 |
/* Register mmio */ |
resource_size_t rmmio_base; |
resource_size_t rmmio_size; |
void *rmmio; |
void __iomem *rmmio; |
radeon_rreg_t mc_rreg; |
radeon_wreg_t mc_wreg; |
radeon_rreg_t pll_rreg; |
1219,21 → 1474,19 |
struct radeon_mode_info mode_info; |
struct radeon_scratch scratch; |
struct radeon_mman mman; |
struct radeon_fence_driver fence_drv; |
struct radeon_cp cp; |
/* cayman compute rings */ |
struct radeon_cp cp1; |
struct radeon_cp cp2; |
struct radeon_ib_pool ib_pool; |
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; |
wait_queue_head_t fence_queue; |
struct mutex ring_lock; |
struct radeon_ring ring[RADEON_NUM_RINGS]; |
bool ib_pool_ready; |
struct radeon_sa_manager ring_tmp_bo; |
struct radeon_irq irq; |
struct radeon_asic *asic; |
struct radeon_gem gem; |
struct radeon_pm pm; |
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; |
struct mutex cs_mutex; |
struct radeon_wb wb; |
struct radeon_dummy_page dummy_page; |
bool gpu_lockup; |
bool shutdown; |
bool suspend; |
bool need_dma32; |
1243,28 → 1496,33 |
const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
const struct firmware *rlc_fw; /* r6/700 RLC firmware */ |
const struct firmware *mc_fw; /* NI MC firmware */ |
const struct firmware *ce_fw; /* SI CE firmware */ |
struct r600_blit r600_blit; |
struct r600_blit r600_video; |
struct r700_vram_scratch vram_scratch; |
struct r600_vram_scratch vram_scratch; |
int msi_enabled; /* msi enabled */ |
struct r600_ih ih; /* r6/700 interrupt ring */ |
struct si_rlc rlc; |
// struct work_struct hotplug_work; |
// struct work_struct audio_work; |
int num_crtc; /* number of crtcs */ |
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ |
struct mutex vram_mutex; |
|
/* audio stuff */ |
bool audio_enabled; |
// struct timer_list audio_timer; |
int audio_channels; |
int audio_rate; |
int audio_bits_per_sample; |
uint8_t audio_status_bits; |
uint8_t audio_category_code; |
|
|
// struct r600_audio audio_status; /* audio stuff */ |
// struct notifier_block acpi_nb; |
/* only one userspace can use Hyperz features or CMASK at a time */ |
// struct drm_file *hyperz_filp; |
// struct drm_file *cmask_filp; |
/* i2c buses */ |
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS]; |
/* debugfs */ |
// struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS]; |
unsigned debugfs_count; |
/* virtual memory */ |
struct radeon_vm_manager vm_manager; |
struct mutex gpu_clock_mutex; |
/* ACPI interface */ |
// struct radeon_atif atif; |
// struct radeon_atcs atcs; |
}; |
|
int radeon_device_init(struct radeon_device *rdev, |
1274,46 → 1532,11 |
void radeon_device_fini(struct radeon_device *rdev); |
int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
|
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
if (reg < rdev->rmmio_size) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
} |
} |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
u32 r100_io_rreg(struct radeon_device *rdev, u32 reg); |
void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
|
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
if (reg < rdev->rmmio_size) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
} |
} |
|
static inline u32 r100_io_rreg(struct radeon_device *rdev, u32 reg) |
{ |
if (reg < rdev->rio_mem_size) |
return ioread32(rdev->rio_mem + reg); |
else { |
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); |
return ioread32(rdev->rio_mem + RADEON_MM_DATA); |
} |
} |
|
static inline void r100_io_wreg(struct radeon_device *rdev, u32 reg, u32 v) |
{ |
if (reg < rdev->rio_mem_size) |
iowrite32(v, rdev->rio_mem + reg); |
else { |
iowrite32(reg, rdev->rio_mem + RADEON_MM_INDEX); |
iowrite32(v, rdev->rio_mem + RADEON_MM_DATA); |
} |
} |
|
/* |
* Cast helper |
*/ |
1322,10 → 1545,10 |
/* |
* Registers read & write functions. |
*/ |
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) |
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) |
#define RREG16(reg) readw(((void __iomem *)rdev->rmmio) + (reg)) |
#define WREG16(reg, v) writew(v, ((void __iomem *)rdev->rmmio) + (reg)) |
#define RREG8(reg) readb((rdev->rmmio) + (reg)) |
#define WREG8(reg, v) writeb(v, (rdev->rmmio) + (reg)) |
#define RREG16(reg) readw((rdev->rmmio) + (reg)) |
#define WREG16(reg, v) writew(v, (rdev->rmmio) + (reg)) |
#define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", r100_mm_rreg(rdev, (reg))) |
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
1417,6 → 1640,9 |
#define ASIC_IS_DCE41(rdev) ((rdev->family >= CHIP_PALM) && \ |
(rdev->flags & RADEON_IS_IGP)) |
#define ASIC_IS_DCE5(rdev) ((rdev->family >= CHIP_BARTS)) |
#define ASIC_IS_DCE6(rdev) ((rdev->family >= CHIP_ARUBA)) |
#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ |
(rdev->flags & RADEON_IS_IGP)) |
|
/* |
* BIOS helpers. |
1434,20 → 1660,19 |
/* |
* RING helpers. |
*/ |
static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v) |
#if DRM_DEBUG_CODE == 0 |
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v) |
{ |
#if DRM_DEBUG_CODE |
if (rdev->cp.count_dw <= 0) { |
DRM_ERROR("radeon: writting more dword to ring than expected !\n"); |
ring->ring[ring->wptr++] = v; |
ring->wptr &= ring->ptr_mask; |
ring->count_dw--; |
ring->ring_free_dw--; |
} |
#else |
/* With debugging this is just too big to inline */ |
void radeon_ring_write(struct radeon_ring *ring, uint32_t v); |
#endif |
rdev->cp.ring[rdev->cp.wptr++] = v; |
rdev->cp.wptr &= rdev->cp.ptr_mask; |
rdev->cp.count_dw--; |
rdev->cp.ring_free_dw--; |
} |
|
|
/* |
* ASICs macro. |
*/ |
1455,53 → 1680,64 |
#define radeon_fini(rdev) (rdev)->asic->fini((rdev)) |
#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) |
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) |
#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) |
#define radeon_cs_parse(rdev, r, p) (rdev)->asic->ring[(r)].cs_parse((p)) |
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev)) |
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev)) |
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) |
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p)) |
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) |
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) |
#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) |
#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) |
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) |
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) |
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) |
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) |
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) |
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) |
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f)) |
#define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev)) |
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev)) |
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e)) |
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev)) |
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) |
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) |
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) |
#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev)) |
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev)) |
#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd)) |
#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd)) |
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev)) |
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart.set_page((rdev), (i), (p)) |
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) |
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) |
#define radeon_asic_vm_set_page(rdev, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (pe), (addr), (count), (incr), (flags))) |
#define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)].ring_start((rdev), (cp)) |
#define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)].ring_test((rdev), (cp)) |
#define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)].ib_test((rdev), (cp)) |
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib)) |
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib)) |
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)].is_lockup((rdev), (cp)) |
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)].vm_flush((rdev), (r), (vm)) |
#define radeon_irq_set(rdev) (rdev)->asic->irq.set((rdev)) |
#define radeon_irq_process(rdev) (rdev)->asic->irq.process((rdev)) |
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->display.get_vblank_counter((rdev), (crtc)) |
#define radeon_set_backlight_level(rdev, e, l) (rdev)->asic->display.set_backlight_level((e), (l)) |
#define radeon_get_backlight_level(rdev, e) (rdev)->asic->display.get_backlight_level((e)) |
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence)) |
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait)) |
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f)) |
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f)) |
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f)) |
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index |
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index |
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index |
#define radeon_get_engine_clock(rdev) (rdev)->asic->pm.get_engine_clock((rdev)) |
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->pm.set_engine_clock((rdev), (e)) |
#define radeon_get_memory_clock(rdev) (rdev)->asic->pm.get_memory_clock((rdev)) |
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->pm.set_memory_clock((rdev), (e)) |
#define radeon_get_pcie_lanes(rdev) (rdev)->asic->pm.get_pcie_lanes((rdev)) |
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->pm.set_pcie_lanes((rdev), (l)) |
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->pm.set_clock_gating((rdev), (e)) |
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->surface.set_reg((rdev), (r), (f), (p), (o), (s))) |
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->surface.clear_reg((rdev), (r))) |
#define radeon_bandwidth_update(rdev) (rdev)->asic->display.bandwidth_update((rdev)) |
#define radeon_hpd_init(rdev) (rdev)->asic->hpd.init((rdev)) |
#define radeon_hpd_fini(rdev) (rdev)->asic->hpd.fini((rdev)) |
#define radeon_hpd_sense(rdev, h) (rdev)->asic->hpd.sense((rdev), (h)) |
#define radeon_hpd_set_polarity(rdev, h) (rdev)->asic->hpd.set_polarity((rdev), (h)) |
#define radeon_gui_idle(rdev) (rdev)->asic->gui_idle((rdev)) |
#define radeon_pm_misc(rdev) (rdev)->asic->pm_misc((rdev)) |
#define radeon_pm_prepare(rdev) (rdev)->asic->pm_prepare((rdev)) |
#define radeon_pm_finish(rdev) (rdev)->asic->pm_finish((rdev)) |
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm_init_profile((rdev)) |
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm_get_dynpm_state((rdev)) |
#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pre_page_flip((rdev), (crtc)) |
#define radeon_page_flip(rdev, crtc, base) rdev->asic->page_flip((rdev), (crtc), (base)) |
#define radeon_post_page_flip(rdev, crtc) rdev->asic->post_page_flip((rdev), (crtc)) |
#define radeon_pm_misc(rdev) (rdev)->asic->pm.misc((rdev)) |
#define radeon_pm_prepare(rdev) (rdev)->asic->pm.prepare((rdev)) |
#define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) |
#define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) |
#define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) |
#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) |
#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) |
#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) |
#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) |
#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) |
|
/* Common functions */ |
/* AGP */ |
extern int radeon_gpu_reset(struct radeon_device *rdev); |
extern void radeon_agp_disable(struct radeon_device *rdev); |
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
extern void radeon_gart_restore(struct radeon_device *rdev); |
extern int radeon_modeset_init(struct radeon_device *rdev); |
extern void radeon_modeset_fini(struct radeon_device *rdev); |
extern bool radeon_card_posted(struct radeon_device *rdev); |
1525,12 → 1761,91 |
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); |
|
/* |
* vm |
*/ |
int radeon_vm_manager_init(struct radeon_device *rdev); |
void radeon_vm_manager_fini(struct radeon_device *rdev); |
void radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); |
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); |
int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm); |
void radeon_vm_add_to_lru(struct radeon_device *rdev, struct radeon_vm *vm); |
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, |
struct radeon_vm *vm, int ring); |
void radeon_vm_fence(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct radeon_fence *fence); |
uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr); |
int radeon_vm_bo_update_pte(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct radeon_bo *bo, |
struct ttm_mem_reg *mem); |
void radeon_vm_bo_invalidate(struct radeon_device *rdev, |
struct radeon_bo *bo); |
struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, |
struct radeon_bo *bo); |
struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, |
struct radeon_vm *vm, |
struct radeon_bo *bo); |
int radeon_vm_bo_set_addr(struct radeon_device *rdev, |
struct radeon_bo_va *bo_va, |
uint64_t offset, |
uint32_t flags); |
int radeon_vm_bo_rmv(struct radeon_device *rdev, |
struct radeon_bo_va *bo_va); |
|
/* audio */ |
void r600_audio_update_hdmi(struct work_struct *work); |
|
/* |
* R600 vram scratch functions |
*/ |
int r600_vram_scratch_init(struct radeon_device *rdev); |
void r600_vram_scratch_fini(struct radeon_device *rdev); |
|
/* |
* r600 cs checking helper |
*/ |
unsigned r600_mip_minify(unsigned size, unsigned level); |
bool r600_fmt_is_valid_color(u32 format); |
bool r600_fmt_is_valid_texture(u32 format, enum radeon_family family); |
int r600_fmt_get_blocksize(u32 format); |
int r600_fmt_get_nblocksx(u32 format, u32 w); |
int r600_fmt_get_nblocksy(u32 format, u32 h); |
|
/* |
* r600 functions used by radeon_encoder.c |
*/ |
struct radeon_hdmi_acr { |
u32 clock; |
|
int n_32khz; |
int cts_32khz; |
|
int n_44_1khz; |
int cts_44_1khz; |
|
int n_48khz; |
int cts_48khz; |
|
}; |
|
extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock); |
|
extern void r600_hdmi_enable(struct drm_encoder *encoder); |
extern void r600_hdmi_disable(struct drm_encoder *encoder); |
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
extern u32 r6xx_remap_render_backend(struct radeon_device *rdev, |
u32 tiling_pipe_num, |
u32 max_rb_num, |
u32 total_max_rb_num, |
u32 enabled_rb_mask); |
|
/* |
* evergreen functions used by radeon_encoder.c |
*/ |
|
extern void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); |
|
extern int ni_init_microcode(struct radeon_device *rdev); |
extern int ni_mc_load_microcode(struct radeon_device *rdev); |
|
1537,8 → 1852,10 |
/* radeon_acpi.c */ |
#if defined(CONFIG_ACPI) |
extern int radeon_acpi_init(struct radeon_device *rdev); |
extern void radeon_acpi_fini(struct radeon_device *rdev); |
#else |
static inline int radeon_acpi_init(struct radeon_device *rdev) { return 0; } |
static inline void radeon_acpi_fini(struct radeon_device *rdev) { } |
#endif |
|
#include "radeon_object.h" |
1555,22 → 1872,4 |
|
|
|
struct work_struct; |
typedef void (*work_func_t)(struct work_struct *work); |
|
/* |
* The first word is the work queue pointer and the flags rolled into |
* one |
*/ |
#define work_data_bits(work) ((unsigned long *)(&(work)->data)) |
|
struct work_struct { |
atomic_long_t data; |
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ |
#define WORK_STRUCT_FLAG_MASK (3UL) |
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
struct list_head entry; |
work_func_t func; |
}; |
|
#endif |