Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5353 → Rev 5354

/drivers/video/drm/i915/intel_ringbuffer.h
5,6 → 5,13
 
#define I915_CMD_HASH_ORDER 9
 
/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill,
* but keeps the logic simple. Indeed, the whole purpose of this macro is just
* to give some inclination as to some of the magic values used in the various
* workarounds!
*/
#define CACHELINE_BYTES 64
 
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
90,6 → 97,15
struct drm_i915_gem_object *obj;
void __iomem *virtual_start;
 
struct intel_engine_cs *ring;
 
/*
* FIXME: This backpointer is an artifact of the history of how the
* execlist patches came into being. It will get removed once the basic
* code has landed.
*/
struct intel_context *FIXME_lrc_ctx;
 
u32 head;
u32 tail;
int space;
132,6 → 148,9
 
int (*init)(struct intel_engine_cs *ring);
 
int (*init_context)(struct intel_engine_cs *ring,
struct intel_context *ctx);
 
void (*write_tail)(struct intel_engine_cs *ring,
u32 value);
int __must_check (*flush)(struct intel_engine_cs *ring,
214,6 → 233,19
unsigned int num_dwords);
} semaphore;
 
/* Execlists */
spinlock_t execlist_lock;
struct list_head execlist_queue;
struct list_head execlist_retired_req_list;
u8 next_context_status_buffer;
u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
int (*emit_request)(struct intel_ringbuffer *ringbuf);
int (*emit_flush)(struct intel_ringbuffer *ringbuf,
u32 invalidate_domains,
u32 flush_domains);
int (*emit_bb_start)(struct intel_ringbuffer *ringbuf,
u64 offset, unsigned flags);
 
/**
* List of objects currently involved in rendering from the
* ringbuffer.
287,11 → 319,7
u32 (*get_cmd_length_mask)(u32 cmd_header);
};
 
static inline bool
intel_ring_initialized(struct intel_engine_cs *ring)
{
return ring->buffer && ring->buffer->obj;
}
bool intel_ring_initialized(struct intel_engine_cs *ring);
 
static inline unsigned
intel_ring_flag(struct intel_engine_cs *ring)
355,6 → 383,13
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
int intel_alloc_ringbuffer_obj(struct drm_device *dev,
struct intel_ringbuffer *ringbuf);
 
void intel_stop_ring_buffer(struct intel_engine_cs *ring);
void intel_cleanup_ring_buffer(struct intel_engine_cs *ring);
 
372,6 → 407,9
struct intel_ringbuffer *ringbuf = ring->buffer;
ringbuf->tail &= ringbuf->size - 1;
}
int __intel_ring_space(int head, int tail, int size);
int intel_ring_space(struct intel_ringbuffer *ringbuf);
bool intel_ring_stopped(struct intel_engine_cs *ring);
void __intel_ring_advance(struct intel_engine_cs *ring);
 
int __must_check intel_ring_idle(struct intel_engine_cs *ring);
379,6 → 417,9
int intel_ring_flush_all_caches(struct intel_engine_cs *ring);
int intel_ring_invalidate_all_caches(struct intel_engine_cs *ring);
 
void intel_fini_pipe_control(struct intel_engine_cs *ring);
int intel_init_pipe_control(struct intel_engine_cs *ring);
 
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_bsd2_ring_buffer(struct drm_device *dev);
388,6 → 429,8
u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
void intel_ring_setup_status_page(struct intel_engine_cs *ring);
 
int init_workarounds_ring(struct intel_engine_cs *ring);
 
static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
{
return ringbuf->tail;
405,7 → 448,4
ring->trace_irq_seqno = seqno;
}
 
/* DRI warts */
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
 
#endif /* _INTEL_RINGBUFFER_H_ */