Subversion Repositories Kolibri OS

Rev

Rev 6937 | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 6937 Rev 7144
1
/*
1
/*
2
 * Copyright © 2014 Intel Corporation
2
 * Copyright © 2014 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21
 * DEALINGS IN THE SOFTWARE.
21
 * DEALINGS IN THE SOFTWARE.
22
 */
22
 */
23
 
23
 
24
#ifndef _INTEL_LRC_H_
24
#ifndef _INTEL_LRC_H_
25
#define _INTEL_LRC_H_
25
#define _INTEL_LRC_H_
26
 
26
 
27
#define GEN8_LR_CONTEXT_ALIGN 4096
27
#define GEN8_LR_CONTEXT_ALIGN 4096
28
#define GEN8_CSB_ENTRIES 6
-
 
29
#define GEN8_CSB_PTR_MASK 0x07
-
 
30
 
28
 
31
/* Execlists regs */
29
/* Execlists regs */
32
#define RING_ELSP(ring)				_MMIO((ring)->mmio_base + 0x230)
30
#define RING_ELSP(ring)				_MMIO((ring)->mmio_base + 0x230)
33
#define RING_EXECLIST_STATUS_LO(ring)		_MMIO((ring)->mmio_base + 0x234)
31
#define RING_EXECLIST_STATUS_LO(ring)		_MMIO((ring)->mmio_base + 0x234)
34
#define RING_EXECLIST_STATUS_HI(ring)		_MMIO((ring)->mmio_base + 0x234 + 4)
32
#define RING_EXECLIST_STATUS_HI(ring)		_MMIO((ring)->mmio_base + 0x234 + 4)
35
#define RING_CONTEXT_CONTROL(ring)		_MMIO((ring)->mmio_base + 0x244)
33
#define RING_CONTEXT_CONTROL(ring)		_MMIO((ring)->mmio_base + 0x244)
36
#define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
34
#define	  CTX_CTRL_INHIBIT_SYN_CTX_SWITCH	(1 << 3)
37
#define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
35
#define	  CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT	(1 << 0)
38
#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
36
#define   CTX_CTRL_RS_CTX_ENABLE                (1 << 1)
39
#define RING_CONTEXT_STATUS_BUF_LO(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8)
37
#define RING_CONTEXT_STATUS_BUF_LO(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8)
40
#define RING_CONTEXT_STATUS_BUF_HI(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
38
#define RING_CONTEXT_STATUS_BUF_HI(ring, i)	_MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
41
#define RING_CONTEXT_STATUS_PTR(ring)		_MMIO((ring)->mmio_base + 0x3a0)
39
#define RING_CONTEXT_STATUS_PTR(ring)		_MMIO((ring)->mmio_base + 0x3a0)
-
 
40
 
-
 
41
/* The docs specify that the write pointer wraps around after 5h, "After status
-
 
42
 * is written out to the last available status QW at offset 5h, this pointer
-
 
43
 * wraps to 0."
-
 
44
 *
-
 
45
 * Therefore, one must infer than even though there are 3 bits available, 6 and
-
 
46
 * 7 appear to be * reserved.
-
 
47
 */
-
 
48
#define GEN8_CSB_ENTRIES 6
-
 
49
#define GEN8_CSB_PTR_MASK 0x7
-
 
50
#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
-
 
51
#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
-
 
52
#define GEN8_CSB_WRITE_PTR(csb_status) \
-
 
53
	(((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
-
 
54
#define GEN8_CSB_READ_PTR(csb_status) \
-
 
55
	(((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
42
 
56
 
43
/* Logical Rings */
57
/* Logical Rings */
44
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
58
int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
45
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
59
int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
46
void intel_logical_ring_stop(struct intel_engine_cs *ring);
60
void intel_logical_ring_stop(struct intel_engine_cs *ring);
47
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
61
void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
48
int intel_logical_rings_init(struct drm_device *dev);
62
int intel_logical_rings_init(struct drm_device *dev);
49
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
63
int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
50
 
64
 
51
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
65
int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
52
/**
66
/**
53
 * intel_logical_ring_advance() - advance the ringbuffer tail
67
 * intel_logical_ring_advance() - advance the ringbuffer tail
54
 * @ringbuf: Ringbuffer to advance.
68
 * @ringbuf: Ringbuffer to advance.
55
 *
69
 *
56
 * The tail is only updated in our logical ringbuffer struct.
70
 * The tail is only updated in our logical ringbuffer struct.
57
 */
71
 */
58
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
72
static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
59
{
73
{
60
	ringbuf->tail &= ringbuf->size - 1;
74
	ringbuf->tail &= ringbuf->size - 1;
61
}
75
}
62
/**
76
/**
63
 * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
77
 * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
64
 * @ringbuf: Ringbuffer to write to.
78
 * @ringbuf: Ringbuffer to write to.
65
 * @data: DWORD to write.
79
 * @data: DWORD to write.
66
 */
80
 */
67
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
81
static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
68
					   u32 data)
82
					   u32 data)
69
{
83
{
70
	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
84
	iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
71
	ringbuf->tail += 4;
85
	ringbuf->tail += 4;
72
}
86
}
73
static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
87
static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
74
					       i915_reg_t reg)
88
					       i915_reg_t reg)
75
{
89
{
76
	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
90
	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
77
}
91
}
78
 
92
 
79
/* Logical Ring Contexts */
93
/* Logical Ring Contexts */
80
 
94
 
81
/* One extra page is added before LRC for GuC as shared data */
95
/* One extra page is added before LRC for GuC as shared data */
82
#define LRC_GUCSHR_PN	(0)
96
#define LRC_GUCSHR_PN	(0)
83
#define LRC_PPHWSP_PN	(LRC_GUCSHR_PN + 1)
97
#define LRC_PPHWSP_PN	(LRC_GUCSHR_PN + 1)
84
#define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
98
#define LRC_STATE_PN	(LRC_PPHWSP_PN + 1)
85
 
99
 
86
void intel_lr_context_free(struct intel_context *ctx);
100
void intel_lr_context_free(struct intel_context *ctx);
-
 
101
uint32_t intel_lr_context_size(struct intel_engine_cs *ring);
87
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
102
int intel_lr_context_deferred_alloc(struct intel_context *ctx,
88
				    struct intel_engine_cs *ring);
103
				    struct intel_engine_cs *ring);
89
void intel_lr_context_unpin(struct drm_i915_gem_request *req);
104
void intel_lr_context_unpin(struct intel_context *ctx,
-
 
105
			    struct intel_engine_cs *engine);
90
void intel_lr_context_reset(struct drm_device *dev,
106
void intel_lr_context_reset(struct drm_device *dev,
91
			struct intel_context *ctx);
107
			struct intel_context *ctx);
92
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
108
uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
93
				     struct intel_engine_cs *ring);
109
				     struct intel_engine_cs *ring);
-
 
110
 
-
 
111
u32 intel_execlists_ctx_id(struct intel_context *ctx,
-
 
112
			   struct intel_engine_cs *ring);
94
 
113
 
95
/* Execlists */
114
/* Execlists */
96
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
115
int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
97
struct i915_execbuffer_params;
116
struct i915_execbuffer_params;
98
int intel_execlists_submission(struct i915_execbuffer_params *params,
117
int intel_execlists_submission(struct i915_execbuffer_params *params,
99
			       struct drm_i915_gem_execbuffer2 *args,
118
			       struct drm_i915_gem_execbuffer2 *args,
100
			       struct list_head *vmas);
119
			       struct list_head *vmas);
101
u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
-
 
102
 
120
 
103
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
121
void intel_lrc_irq_handler(struct intel_engine_cs *ring);
104
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
122
void intel_execlists_retire_requests(struct intel_engine_cs *ring);
105
 
123
 
106
#endif /* _INTEL_LRC_H_ */
124
#endif /* _INTEL_LRC_H_ */