Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5179 → Rev 5271

/drivers/video/drm/radeon/radeon.h
60,12 → 60,13
* are considered as fatal)
*/
 
#include <asm/atomic.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
#include <linux/interval_tree.h>
#include <asm/div64.h>
#include <linux/fence.h>
 
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
73,11 → 74,11
//#include <ttm/ttm_module.h>
#include <ttm/ttm_execbuf_util.h>
 
#include <drm/drm_gem.h>
 
#include <linux/irqreturn.h>
#include <pci.h>
#include <linux/pci.h>
 
#include <errno-base.h>
 
#include "radeon_family.h"
#include "radeon_mode.h"
#include "radeon_reg.h"
154,9 → 155,6
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
 
/* fence seq are set to this number when signaled */
#define RADEON_FENCE_SIGNALED_SEQ 0LL
 
/* internal ring indices */
/* r1xx+ has gfx CP ring */
#define RADEON_RING_TYPE_GFX_INDEX 0
183,9 → 181,6
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
 
/* number of hw syncs before falling back on blocking */
#define RADEON_NUM_SYNCS 4
 
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
384,6 → 379,7
* Fences.
*/
struct radeon_fence_driver {
struct radeon_device *rdev;
uint32_t scratch_reg;
uint64_t gpu_addr;
volatile uint32_t *cpu_addr;
390,22 → 386,26
/* sync_seq is protected by ring emission lock */
uint64_t sync_seq[RADEON_NUM_RINGS];
atomic64_t last_seq;
bool initialized;
bool initialized, delayed_irq;
struct delayed_work lockup_work;
};
 
struct radeon_fence {
struct fence base;
 
struct radeon_device *rdev;
struct kref kref;
/* protected by radeon_fence.lock */
uint64_t seq;
/* RB, DMA, etc. */
unsigned ring;
bool is_vm_update;
 
wait_queue_t fence_wake;
};
 
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev);
void radeon_fence_driver_force_completion(struct radeon_device *rdev, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
481,6 → 481,15
#endif
};
 
struct radeon_bo_list {
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
unsigned prefered_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
};
 
/* bo virtual address in a specific vm */
struct radeon_bo_va {
/* protected by bo being reserved */
487,6 → 496,7
struct list_head bo_list;
uint32_t flags;
uint64_t addr;
struct radeon_fence *last_pt_update;
unsigned ref_count;
 
/* protected by vm mutex */
503,7 → 513,7
struct list_head list;
/* Protected by tbo.reserved */
u32 initial_domain;
u32 placements[3];
struct ttm_place placements[4];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
522,6 → 532,8
struct drm_gem_object gem_base;
 
pid_t pid;
 
struct radeon_mn *mn;
};
#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
 
604,7 → 616,6
struct radeon_sa_bo *sa_bo;
signed waiters;
uint64_t gpu_addr;
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
};
 
int radeon_semaphore_create(struct radeon_device *rdev,
613,16 → 624,33
struct radeon_semaphore *semaphore);
bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
struct radeon_semaphore *semaphore);
void radeon_semaphore_sync_to(struct radeon_semaphore *semaphore,
struct radeon_fence *fence);
int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int waiting_ring);
void radeon_semaphore_free(struct radeon_device *rdev,
struct radeon_semaphore **semaphore,
struct radeon_fence *fence);
 
/*
* Synchronization
*/
struct radeon_sync {
struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
struct radeon_fence *sync_to[RADEON_NUM_RINGS];
struct radeon_fence *last_vm_update;
};
 
void radeon_sync_create(struct radeon_sync *sync);
void radeon_sync_fence(struct radeon_sync *sync,
struct radeon_fence *fence);
int radeon_sync_resv(struct radeon_device *rdev,
struct radeon_sync *sync,
struct reservation_object *resv,
bool shared);
int radeon_sync_rings(struct radeon_device *rdev,
struct radeon_sync *sync,
int waiting_ring);
void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
struct radeon_fence *fence);
 
/*
* GART structures, functions & helpers
*/
struct radeon_mc;
722,6 → 750,10
 
int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
phys_addr_t *aperture_base,
size_t *aperture_size,
size_t *start_offset);
 
/*
* IRQS.
801,6 → 833,7
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
bool radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring);
void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
822,7 → 855,7
struct radeon_fence *fence;
struct radeon_vm *vm;
bool is_const_ib;
struct radeon_semaphore *semaphore;
struct radeon_sync sync;
};
 
struct radeon_ring {
899,10 → 932,23
uint64_t addr;
};
 
struct radeon_vm_id {
unsigned id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct radeon_fence *flushed_updates;
/* last use of vmid */
struct radeon_fence *last_id_use;
};
 
struct radeon_vm {
struct mutex mutex;
 
struct rb_root va;
unsigned id;
 
/* protecting invalidated and freed */
spinlock_t status_lock;
 
/* BOs moved, but not yet updated in the PT */
struct list_head invalidated;
 
911,7 → 957,6
 
/* contains the page directory */
struct radeon_bo *page_directory;
uint64_t pd_gpu_addr;
unsigned max_pde_used;
 
/* array of page tables, one for each page directory entry */
919,13 → 964,8
 
struct radeon_bo_va *ib_bo_va;
 
struct mutex mutex;
/* last fence for cs using this vm */
struct radeon_fence *fence;
/* last flush or NULL if we still need to flush */
struct radeon_fence *last_flush;
/* last use of vmid */
struct radeon_fence *last_id_use;
/* for id and flush management per ring */
struct radeon_vm_id ids[RADEON_NUM_RINGS];
};
 
struct radeon_vm_manager {
1033,19 → 1073,7
/*
* CS.
*/
struct radeon_cs_reloc {
struct drm_gem_object *gobj;
struct radeon_bo *robj;
struct ttm_validate_buffer tv;
uint64_t gpu_offset;
unsigned prefered_domains;
unsigned allowed_domains;
uint32_t tiling_flags;
uint32_t handle;
};
 
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
uint32_t *kdata;
void __user *user_ptr;
1063,16 → 1091,15
unsigned idx;
/* relocations */
unsigned nrelocs;
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct radeon_cs_reloc *vm_bos;
struct radeon_bo_list *relocs;
struct radeon_bo_list *vm_bos;
struct list_head validated;
unsigned dma_reloc_idx;
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
int chunk_flags_idx;
int chunk_const_ib_idx;
struct radeon_cs_chunk *chunk_ib;
struct radeon_cs_chunk *chunk_relocs;
struct radeon_cs_chunk *chunk_flags;
struct radeon_cs_chunk *chunk_const_ib;
struct radeon_ib ib;
struct radeon_ib const_ib;
void *track;
1086,7 → 1113,7
 
static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
struct radeon_cs_chunk *ibc = p->chunk_ib;
 
if (ibc->kdata)
return ibc->kdata[idx];
1498,6 → 1525,10
u8 t_hyst;
u32 cycle_delay;
u16 t_max;
u8 control_mode;
u16 default_max_fan_pwm;
u16 default_fan_output_sensitivity;
u16 fan_output_sensitivity;
bool ucode_fan_control;
};
 
1631,6 → 1662,11
/* internal thermal controller on rv6xx+ */
enum radeon_int_thermal_type int_thermal_type;
struct device *int_hwmon_dev;
/* fan control parameters */
bool no_fan;
u8 fan_pulses_per_revolution;
u8 fan_min_rpm;
u8 fan_max_rpm;
/* dpm */
bool dpm_enabled;
struct radeon_dpm dpm;
1665,7 → 1701,8
uint32_t handle, struct radeon_fence **fence);
int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
uint32_t handle, struct radeon_fence **fence);
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo);
void radeon_uvd_force_into_uvd_segment(struct radeon_bo *rbo,
uint32_t allowed_domains);
void radeon_uvd_free_handles(struct radeon_device *rdev,
struct drm_file *filp);
int radeon_uvd_cs_parse(struct radeon_cs_parser *parser);
1754,6 → 1791,11
struct radeon_ring *cpB);
void radeon_test_syncing(struct radeon_device *rdev);
 
/*
* MMU Notifier
*/
int radeon_mn_register(struct radeon_bo *bo, unsigned long addr);
void radeon_mn_unregister(struct radeon_bo *bo);
 
/*
* Debugfs
1787,7 → 1829,8
void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
struct radeon_semaphore *semaphore, bool emit_wait);
void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm);
void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr);
 
/* testing functions */
int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
1868,24 → 1911,24
} display;
/* copy functions for bo handling */
struct {
int (*blit)(struct radeon_device *rdev,
struct radeon_fence *(*blit)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
struct reservation_object *resv);
u32 blit_ring_index;
int (*dma)(struct radeon_device *rdev,
struct radeon_fence *(*dma)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
struct reservation_object *resv);
u32 dma_ring_index;
/* method used for bo copy */
int (*copy)(struct radeon_device *rdev,
struct radeon_fence *(*copy)(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
unsigned num_gpu_pages,
struct radeon_fence **fence);
struct reservation_object *resv);
/* ring used for bo copies */
u32 copy_ring_index;
} copy;
2291,6 → 2334,7
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
unsigned fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
2309,7 → 2353,7
bool need_dma32;
bool accel_working;
bool fastfb_working; /* IGP feature*/
bool needs_reset;
bool needs_reset, in_reset;
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
const struct firmware *me_fw; /* all family ME firmware */
const struct firmware *pfp_fw; /* r6/700 PFP firmware */
2330,7 → 2374,6
struct radeon_mec mec;
struct work_struct hotplug_work;
struct work_struct audio_work;
struct work_struct reset_work;
int num_crtc; /* number of crtcs */
struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
bool has_uvd;
2355,6 → 2398,8
struct radeon_atcs atcs;
/* srbm instance registers */
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrents access to GRBM index */
struct mutex grbm_idx_mutex;
/* clock, powergating flags */
u32 cg_flags;
u32 pg_flags;
2366,6 → 2411,7
/* tracking pinned memory */
u64 vram_pin_size;
u64 gart_pin_size;
struct mutex mn_lock;
};
 
bool radeon_is_px(struct drm_device *dev);
2421,8 → 2467,18
/*
* Cast helper
*/
#define to_radeon_fence(p) ((struct radeon_fence *)(p))
extern const struct fence_ops radeon_fence_ops;
 
static inline struct radeon_fence *to_radeon_fence(struct fence *f)
{
struct radeon_fence *__f = container_of(f, struct radeon_fence, base);
 
if (__f->base.ops == &radeon_fence_ops)
return __f;
 
return NULL;
}
 
/*
* Registers read & write functions.
*/
2741,18 → 2797,25
/*
* RING helpers.
*/
#if DRM_DEBUG_CODE == 0
 
/**
* radeon_ring_write - write a value to the ring
*
* @ring: radeon_ring structure holding ring information
* @v: dword (dw) value to write
*
* Write a value to the requested ring buffer (all asics).
*/
static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
if (ring->count_dw <= 0)
DRM_ERROR("radeon: writing more dwords to the ring than expected!\n");
 
ring->ring[ring->wptr++] = v;
ring->wptr &= ring->ptr_mask;
ring->count_dw--;
ring->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
 
/*
* ASICs macro.
2778,7 → 2841,7
#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm))
#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
2791,9 → 2854,9
#define radeon_hdmi_setmode(rdev, e, m) (rdev)->asic->display.hdmi_setmode((e), (m))
#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)]->emit_fence((rdev), (fence))
#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)]->emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (f))
#define radeon_copy_blit(rdev, s, d, np, resv) (rdev)->asic->copy.blit((rdev), (s), (d), (np), (resv))
#define radeon_copy_dma(rdev, s, d, np, resv) (rdev)->asic->copy.dma((rdev), (s), (d), (np), (resv))
#define radeon_copy(rdev, s, d, np, resv) (rdev)->asic->copy.copy((rdev), (s), (d), (np), (resv))
#define radeon_copy_blit_ring_index(rdev) (rdev)->asic->copy.blit_ring_index
#define radeon_copy_dma_ring_index(rdev) (rdev)->asic->copy.dma_ring_index
#define radeon_copy_ring_index(rdev) (rdev)->asic->copy.copy_ring_index
2867,6 → 2930,10
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
extern int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
extern bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm);
extern bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
2883,7 → 2950,7
void radeon_vm_manager_fini(struct radeon_device *rdev);
int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
struct radeon_vm *vm,
struct list_head *head);
struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2890,7 → 2957,7
struct radeon_vm *vm, int ring);
void radeon_vm_flush(struct radeon_device *rdev,
struct radeon_vm *vm,
int ring);
int ring, struct radeon_fence *fence);
void radeon_vm_fence(struct radeon_device *rdev,
struct radeon_vm *vm,
struct radeon_fence *fence);
2924,10 → 2991,10
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
void r600_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
bool enable);
u8 enable_mask);
void dce6_audio_enable(struct radeon_device *rdev,
struct r600_audio_pin *pin,
bool enable);
u8 enable_mask);
 
/*
* R600 vram scratch functions
2997,7 → 3064,7
void radeon_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt);
int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc,
struct radeon_bo_list **cs_reloc,
int nomm);
int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
uint32_t *vline_start_end,
3005,7 → 3072,7
 
#include "radeon_object.h"
 
#define DRM_UDELAY(d) udelay(d)
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159
 
resource_size_t
drm_get_resource_start(struct drm_device *dev, unsigned int resource);