33,10 → 33,21 |
#define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
|
#define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) |
#define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) |
#define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) |
enum intel_ring_hangcheck_action { |
HANGCHECK_WAIT, |
HANGCHECK_ACTIVE, |
HANGCHECK_KICK, |
HANGCHECK_HUNG, |
}; |
|
struct intel_ring_hangcheck { |
bool deadlock; |
u32 seqno; |
u32 acthd; |
int score; |
enum intel_ring_hangcheck_action action; |
}; |
|
struct intel_ring_buffer { |
const char *name; |
enum intel_ring_id { |
43,8 → 54,9 |
RCS = 0x0, |
VCS, |
BCS, |
VECS, |
} id; |
#define I915_NUM_RINGS 3 |
#define I915_NUM_RINGS 4 |
u32 mmio_base; |
void __iomem *virtual_start; |
struct drm_device *dev; |
67,7 → 79,7 |
*/ |
u32 last_retired_head; |
|
u32 irq_refcount; /* protected by dev_priv->irq_lock */ |
unsigned irq_refcount; /* protected by dev_priv->irq_lock */ |
u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
u32 trace_irq_seqno; |
u32 sync_seqno[I915_NUM_RINGS-1]; |
102,8 → 114,11 |
struct intel_ring_buffer *to, |
u32 seqno); |
|
u32 semaphore_register[3]; /*our mbox written by others */ |
u32 signal_mbox[2]; /* mboxes this ring signals to */ |
/* our mbox written by others */ |
u32 semaphore_register[I915_NUM_RINGS]; |
/* mboxes this ring signals to */ |
u32 signal_mbox[I915_NUM_RINGS]; |
|
/** |
* List of objects currently involved in rendering from the |
* ringbuffer. |
127,6 → 142,7 |
*/ |
u32 outstanding_lazy_request; |
bool gpu_caches_dirty; |
bool fbc_dirty; |
|
wait_queue_head_t irq_queue; |
|
135,9 → 151,15 |
*/ |
bool itlb_before_ctx_switch; |
struct i915_hw_context *default_context; |
struct drm_i915_gem_object *last_context_obj; |
struct i915_hw_context *last_context; |
|
void *private; |
struct intel_ring_hangcheck hangcheck; |
|
struct { |
struct drm_i915_gem_object *obj; |
u32 gtt_offset; |
volatile u32 *cpu_page; |
} scratch; |
}; |
|
static inline bool |
224,6 → 246,7 |
int intel_init_render_ring_buffer(struct drm_device *dev); |
int intel_init_bsd_ring_buffer(struct drm_device *dev); |
int intel_init_blt_ring_buffer(struct drm_device *dev); |
int intel_init_vebox_ring_buffer(struct drm_device *dev); |
|
u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |