Subversion Repositories Kolibri OS

Rev

Rev 4304 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4304 Rev 4315
1
/*
1
/*
2
 * Copyright (c) 2011 Intel Corporation
2
 * Copyright (c) 2011 Intel Corporation
3
 *
3
 *
4
 * Permission is hereby granted, free of charge, to any person obtaining a
4
 * Permission is hereby granted, free of charge, to any person obtaining a
5
 * copy of this software and associated documentation files (the "Software"),
5
 * copy of this software and associated documentation files (the "Software"),
6
 * to deal in the Software without restriction, including without limitation
6
 * to deal in the Software without restriction, including without limitation
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8
 * and/or sell copies of the Software, and to permit persons to whom the
8
 * and/or sell copies of the Software, and to permit persons to whom the
9
 * Software is furnished to do so, subject to the following conditions:
9
 * Software is furnished to do so, subject to the following conditions:
10
 *
10
 *
11
 * The above copyright notice and this permission notice (including the next
11
 * The above copyright notice and this permission notice (including the next
12
 * paragraph) shall be included in all copies or substantial portions of the
12
 * paragraph) shall be included in all copies or substantial portions of the
13
 * Software.
13
 * Software.
14
 *
14
 *
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
 * SOFTWARE.
21
 * SOFTWARE.
22
 *
22
 *
23
 * Authors:
23
 * Authors:
24
 *    Chris Wilson 
24
 *    Chris Wilson 
25
 *
25
 *
26
 */
26
 */
27
 
27
 
28
#ifndef KGEM_H
28
#ifndef KGEM_H
29
#define KGEM_H
29
#define KGEM_H
30
 
30
 
31
#include 
31
#include 
32
#include 
32
#include 
33
#include 
33
#include 
34
#include 
34
#include 
35
 
35
 
36
#include 
36
#include 
37
 
37
 
38
#include "compiler.h"
38
#include "compiler.h"
39
#include "intel_list.h"
39
#include "intel_list.h"
40
 
40
 
41
#include 
41
#include 
42
#if !defined(MAXSHORT) || !defined(MINSHORT) || \
42
#if !defined(MAXSHORT) || !defined(MINSHORT) || \
43
    !defined(MAXINT) || !defined(MININT)
43
    !defined(MAXINT) || !defined(MININT)
44
/*
44
/*
45
 * Some implementations #define these through , so preclude
45
 * Some implementations #define these through , so preclude
46
 * #include'ing it later.
46
 * #include'ing it later.
47
 */
47
 */
48
 
48
 
49
#include 
49
#include 
50
#undef MAXSHORT
50
#undef MAXSHORT
51
#define MAXSHORT SHRT_MAX
51
#define MAXSHORT SHRT_MAX
52
#undef MINSHORT
52
#undef MINSHORT
53
#define MINSHORT SHRT_MIN
53
#define MINSHORT SHRT_MIN
54
#undef MAXINT
54
#undef MAXINT
55
#define MAXINT INT_MAX
55
#define MAXINT INT_MAX
56
#undef MININT
56
#undef MININT
57
#define MININT INT_MIN
57
#define MININT INT_MIN
58
 
58
 
59
#endif
59
#endif
60
 
60
 
61
struct kgem_bo {
61
struct kgem_bo {
62
	struct kgem_request *rq;
62
	struct kgem_request *rq;
63
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
63
#define RQ(rq) ((struct kgem_request *)((uintptr_t)(rq) & ~3))
64
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
64
#define RQ_RING(rq) ((uintptr_t)(rq) & 3)
65
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
65
#define RQ_IS_BLT(rq) (RQ_RING(rq) == KGEM_BLT)
66
	struct drm_i915_gem_exec_object2 *exec;
66
	struct drm_i915_gem_exec_object2 *exec;
67
 
67
 
68
	struct kgem_bo *proxy;
68
	struct kgem_bo *proxy;
69
 
69
 
70
	struct list list;
70
	struct list list;
71
	struct list request;
71
	struct list request;
72
	struct list vma;
72
	struct list vma;
73
 
73
 
74
    void     *map;
74
    void     *map;
75
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
75
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1)
76
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
76
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0)
77
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
77
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
78
 
78
 
79
	struct kgem_bo_binding {
79
	struct kgem_bo_binding {
80
		struct kgem_bo_binding *next;
80
		struct kgem_bo_binding *next;
81
		uint32_t format;
81
		uint32_t format;
82
		uint16_t offset;
82
		uint16_t offset;
83
	} binding;
83
	} binding;
84
 
84
 
85
	uint32_t unique_id;
85
	uint32_t unique_id;
86
	uint32_t refcnt;
86
	uint32_t refcnt;
87
	uint32_t handle;
87
	uint32_t handle;
88
	uint32_t target_handle;
88
	uint32_t target_handle;
89
	uint32_t presumed_offset;
89
	uint32_t presumed_offset;
90
	uint32_t delta;
90
	uint32_t delta;
91
	union {
91
	union {
92
		struct {
92
		struct {
93
			uint32_t count:27;
93
			uint32_t count:27;
94
#define PAGE_SIZE 4096
94
#define PAGE_SIZE 4096
95
            uint32_t bucket:5;
95
            uint32_t bucket:5;
96
#define NUM_CACHE_BUCKETS 16
96
#define NUM_CACHE_BUCKETS 16
97
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
97
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12))
98
		} pages;
98
		} pages;
99
		uint32_t bytes;
99
		uint32_t bytes;
100
	} size;
100
	} size;
101
    uint32_t pitch  : 18; /* max 128k */
101
    uint32_t pitch  : 18; /* max 128k */
102
	uint32_t tiling : 2;
102
	uint32_t tiling : 2;
103
	uint32_t reusable : 1;
103
	uint32_t reusable : 1;
104
	uint32_t gpu_dirty : 1;
104
	uint32_t gpu_dirty : 1;
105
	uint32_t gtt_dirty : 1;
105
	uint32_t gtt_dirty : 1;
106
	uint32_t domain : 2;
106
	uint32_t domain : 2;
107
	uint32_t needs_flush : 1;
107
	uint32_t needs_flush : 1;
108
	uint32_t snoop : 1;
108
	uint32_t snoop : 1;
109
    uint32_t io     : 1;
109
    uint32_t io     : 1;
110
    uint32_t flush  : 1;
110
    uint32_t flush  : 1;
111
	uint32_t scanout : 1;
111
	uint32_t scanout : 1;
112
	uint32_t purged : 1;
112
	uint32_t purged : 1;
113
};
113
};
114
#define DOMAIN_NONE 0
114
#define DOMAIN_NONE 0
115
#define DOMAIN_CPU 1
115
#define DOMAIN_CPU 1
116
#define DOMAIN_GTT 2
116
#define DOMAIN_GTT 2
117
#define DOMAIN_GPU 3
117
#define DOMAIN_GPU 3
118
 
118
 
119
struct kgem_request {
119
struct kgem_request {
120
	struct list list;
120
	struct list list;
121
	struct kgem_bo *bo;
121
	struct kgem_bo *bo;
122
	struct list buffers;
122
	struct list buffers;
123
	int ring;
123
	int ring;
124
};
124
};
125
 
125
 
126
enum {
126
enum {
127
	MAP_GTT = 0,
127
	MAP_GTT = 0,
128
	MAP_CPU,
128
	MAP_CPU,
129
	NUM_MAP_TYPES,
129
	NUM_MAP_TYPES,
130
};
130
};
131
 
131
 
132
struct kgem {
132
struct kgem {
133
	int fd;
133
	int fd;
134
	int wedged;
134
	int wedged;
135
	unsigned gen;
135
	unsigned gen;
136
 
136
 
137
	uint32_t unique_id;
137
	uint32_t unique_id;
138
 
138
 
139
	enum kgem_mode {
139
	enum kgem_mode {
140
		/* order matches I915_EXEC_RING ordering */
140
		/* order matches I915_EXEC_RING ordering */
141
		KGEM_NONE = 0,
141
		KGEM_NONE = 0,
142
		KGEM_RENDER,
142
		KGEM_RENDER,
143
		KGEM_BSD,
143
		KGEM_BSD,
144
		KGEM_BLT,
144
		KGEM_BLT,
145
	} mode, ring;
145
	} mode, ring;
146
 
146
 
147
	struct list flushing;
147
	struct list flushing;
148
	struct list large;
148
	struct list large;
149
	struct list large_inactive;
149
	struct list large_inactive;
150
	struct list active[NUM_CACHE_BUCKETS][3];
150
	struct list active[NUM_CACHE_BUCKETS][3];
151
	struct list inactive[NUM_CACHE_BUCKETS];
151
	struct list inactive[NUM_CACHE_BUCKETS];
152
	struct list pinned_batches[2];
152
	struct list pinned_batches[2];
153
	struct list snoop;
153
	struct list snoop;
154
	struct list scanout;
154
	struct list scanout;
155
	struct list batch_buffers, active_buffers;
155
	struct list batch_buffers, active_buffers;
156
 
156
 
157
	struct list requests[2];
157
	struct list requests[2];
158
	struct kgem_request *next_request;
158
	struct kgem_request *next_request;
159
	struct kgem_request static_request;
159
	struct kgem_request static_request;
160
 
160
 
161
	struct {
161
	struct {
162
		struct list inactive[NUM_CACHE_BUCKETS];
162
		struct list inactive[NUM_CACHE_BUCKETS];
163
		int16_t count;
163
		int16_t count;
164
	} vma[NUM_MAP_TYPES];
164
	} vma[NUM_MAP_TYPES];
165
 
165
 
166
	uint32_t batch_flags;
166
	uint32_t batch_flags;
167
	uint32_t batch_flags_base;
167
	uint32_t batch_flags_base;
168
#define I915_EXEC_SECURE (1<<9)
168
#define I915_EXEC_SECURE (1<<9)
169
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
169
#define LOCAL_EXEC_OBJECT_WRITE (1<<2)
170
 
170
 
171
	uint16_t nbatch;
171
	uint16_t nbatch;
172
	uint16_t surface;
172
	uint16_t surface;
173
	uint16_t nexec;
173
	uint16_t nexec;
174
	uint16_t nreloc;
174
	uint16_t nreloc;
175
	uint16_t nreloc__self;
175
	uint16_t nreloc__self;
176
	uint16_t nfence;
176
	uint16_t nfence;
177
	uint16_t batch_size;
177
	uint16_t batch_size;
178
	uint16_t min_alignment;
178
	uint16_t min_alignment;
179
 
179
 
180
	uint32_t flush:1;
180
	uint32_t flush:1;
181
	uint32_t need_expire:1;
181
	uint32_t need_expire:1;
182
	uint32_t need_purge:1;
182
	uint32_t need_purge:1;
183
	uint32_t need_retire:1;
183
	uint32_t need_retire:1;
184
	uint32_t need_throttle:1;
184
	uint32_t need_throttle:1;
185
	uint32_t scanout_busy:1;
185
	uint32_t scanout_busy:1;
186
	uint32_t busy:1;
186
	uint32_t busy:1;
187
 
187
 
188
	uint32_t has_create2 :1;
188
	uint32_t has_create2 :1;
189
	uint32_t has_userptr :1;
189
	uint32_t has_userptr :1;
190
	uint32_t has_blt :1;
190
	uint32_t has_blt :1;
191
	uint32_t has_relaxed_fencing :1;
191
	uint32_t has_relaxed_fencing :1;
192
	uint32_t has_relaxed_delta :1;
192
	uint32_t has_relaxed_delta :1;
193
	uint32_t has_semaphores :1;
193
	uint32_t has_semaphores :1;
194
	uint32_t has_secure_batches :1;
194
	uint32_t has_secure_batches :1;
195
	uint32_t has_pinned_batches :1;
195
	uint32_t has_pinned_batches :1;
196
	uint32_t has_caching :1;
196
	uint32_t has_caching :1;
197
	uint32_t has_llc :1;
197
	uint32_t has_llc :1;
198
	uint32_t has_wt :1;
198
	uint32_t has_wt :1;
199
	uint32_t has_no_reloc :1;
199
	uint32_t has_no_reloc :1;
200
	uint32_t has_handle_lut :1;
200
	uint32_t has_handle_lut :1;
201
 
201
 
202
	uint32_t can_blt_cpu :1;
202
	uint32_t can_blt_cpu :1;
203
 
203
 
204
	uint16_t fence_max;
204
	uint16_t fence_max;
205
	uint16_t half_cpu_cache_pages;
205
	uint16_t half_cpu_cache_pages;
206
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
206
	uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable;
207
	uint32_t aperture, aperture_fenced;
207
	uint32_t aperture, aperture_fenced;
208
	uint32_t max_upload_tile_size, max_copy_tile_size;
208
	uint32_t max_upload_tile_size, max_copy_tile_size;
209
	uint32_t max_gpu_size, max_cpu_size;
209
	uint32_t max_gpu_size, max_cpu_size;
210
	uint32_t large_object_size, max_object_size;
210
	uint32_t large_object_size, max_object_size;
211
	uint32_t buffer_size;
211
	uint32_t buffer_size;
212
 
212
 
213
	void (*context_switch)(struct kgem *kgem, int new_mode);
213
	void (*context_switch)(struct kgem *kgem, int new_mode);
214
    void (*retire)(struct kgem *kgem);
214
    void (*retire)(struct kgem *kgem);
215
	void (*expire)(struct kgem *kgem);
215
	void (*expire)(struct kgem *kgem);
216
 
216
 
217
#if 0
217
#if 0
218
	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
218
	void (*memcpy_to_tiled_x)(const void *src, void *dst, int bpp,
219
				  int32_t src_stride, int32_t dst_stride,
219
				  int32_t src_stride, int32_t dst_stride,
220
				  int16_t src_x, int16_t src_y,
220
				  int16_t src_x, int16_t src_y,
221
				  int16_t dst_x, int16_t dst_y,
221
				  int16_t dst_x, int16_t dst_y,
222
				  uint16_t width, uint16_t height);
222
				  uint16_t width, uint16_t height);
223
	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
223
	void (*memcpy_from_tiled_x)(const void *src, void *dst, int bpp,
224
				    int32_t src_stride, int32_t dst_stride,
224
				    int32_t src_stride, int32_t dst_stride,
225
				    int16_t src_x, int16_t src_y,
225
				    int16_t src_x, int16_t src_y,
226
				    int16_t dst_x, int16_t dst_y,
226
				    int16_t dst_x, int16_t dst_y,
227
				    uint16_t width, uint16_t height);
227
				    uint16_t width, uint16_t height);
228
#endif
228
#endif
229
 
229
 
230
	uint16_t reloc__self[256];
230
	uint16_t reloc__self[256];
231
	uint32_t batch[64*1024-8] page_aligned;
231
	uint32_t batch[64*1024-8] page_aligned;
232
	struct drm_i915_gem_exec_object2 exec[384] page_aligned;
232
	struct drm_i915_gem_exec_object2 exec[384] page_aligned;
233
	struct drm_i915_gem_relocation_entry reloc[8192] page_aligned;
233
	struct drm_i915_gem_relocation_entry reloc[8192] page_aligned;
234
 
234
 
235
#ifdef DEBUG_MEMORY
235
#ifdef DEBUG_MEMORY
236
	struct {
236
	struct {
237
		int bo_allocs;
237
		int bo_allocs;
238
		size_t bo_bytes;
238
		size_t bo_bytes;
239
	} debug_memory;
239
	} debug_memory;
240
#endif
240
#endif
241
};
241
};
242
 
242
 
243
#define KGEM_MAX_DEFERRED_VBO 16
243
#define KGEM_MAX_DEFERRED_VBO 16
244
 
244
 
245
#define KGEM_BATCH_RESERVED 1
245
#define KGEM_BATCH_RESERVED 1
246
#define KGEM_RELOC_RESERVED (KGEM_MAX_DEFERRED_VBO)
246
#define KGEM_RELOC_RESERVED (KGEM_MAX_DEFERRED_VBO)
247
#define KGEM_EXEC_RESERVED (1+KGEM_MAX_DEFERRED_VBO)
247
#define KGEM_EXEC_RESERVED (1+KGEM_MAX_DEFERRED_VBO)
248
 
248
 
249
#ifndef ARRAY_SIZE
249
#ifndef ARRAY_SIZE
250
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
250
#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
251
#endif
251
#endif
252
 
252
 
253
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
253
#define KGEM_BATCH_SIZE(K) ((K)->batch_size-KGEM_BATCH_RESERVED)
254
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
254
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED)
255
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
255
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED)
256
 
256
 
257
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
257
void kgem_init(struct kgem *kgem, int fd, struct pci_device *dev, unsigned gen);
258
void kgem_reset(struct kgem *kgem);
258
void kgem_reset(struct kgem *kgem);
259
 
259
 
260
struct kgem_bo *kgem_create_map(struct kgem *kgem,
260
struct kgem_bo *kgem_create_map(struct kgem *kgem,
261
				void *ptr, uint32_t size,
261
				void *ptr, uint32_t size,
262
				bool read_only);
262
				bool read_only);
263
 
263
 
264
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
264
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name);
265
struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
265
struct kgem_bo *kgem_create_for_prime(struct kgem *kgem, int name, uint32_t size);
266
int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
266
int kgem_bo_export_to_prime(struct kgem *kgem, struct kgem_bo *bo);
267
 
267
 
268
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
268
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size, unsigned flags);
269
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
269
struct kgem_bo *kgem_create_proxy(struct kgem *kgem,
270
				  struct kgem_bo *target,
270
				  struct kgem_bo *target,
271
				  int offset, int length);
271
				  int offset, int length);
272
 
272
 
273
void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
273
void kgem_proxy_bo_attach(struct kgem_bo *bo, struct kgem_bo **ptr);
274
 
274
 
275
int kgem_choose_tiling(struct kgem *kgem,
275
int kgem_choose_tiling(struct kgem *kgem,
276
		       int tiling, int width, int height, int bpp);
276
		       int tiling, int width, int height, int bpp);
277
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
277
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth);
278
#define KGEM_CAN_CREATE_GPU     0x1
278
#define KGEM_CAN_CREATE_GPU     0x1
279
#define KGEM_CAN_CREATE_CPU     0x2
279
#define KGEM_CAN_CREATE_CPU     0x2
280
#define KGEM_CAN_CREATE_LARGE	0x4
280
#define KGEM_CAN_CREATE_LARGE	0x4
281
#define KGEM_CAN_CREATE_GTT	0x8
281
#define KGEM_CAN_CREATE_GTT	0x8
282
 
282
 
283
uint32_t kgem_get_unique_id(struct kgem *kgem);
283
uint32_t kgem_get_unique_id(struct kgem *kgem);
284
 
284
 
285
struct kgem_bo *
285
struct kgem_bo *
286
kgem_replace_bo(struct kgem *kgem,
286
kgem_replace_bo(struct kgem *kgem,
287
		struct kgem_bo *src,
287
		struct kgem_bo *src,
288
		uint32_t width,
288
		uint32_t width,
289
		uint32_t height,
289
		uint32_t height,
290
		uint32_t pitch,
290
		uint32_t pitch,
291
		uint32_t bpp);
291
		uint32_t bpp);
292
enum {
292
enum {
293
	CREATE_EXACT = 0x1,
293
	CREATE_EXACT = 0x1,
294
	CREATE_INACTIVE = 0x2,
294
	CREATE_INACTIVE = 0x2,
295
	CREATE_CPU_MAP = 0x4,
295
	CREATE_CPU_MAP = 0x4,
296
	CREATE_GTT_MAP = 0x8,
296
	CREATE_GTT_MAP = 0x8,
297
	CREATE_SCANOUT = 0x10,
297
	CREATE_SCANOUT = 0x10,
298
	CREATE_PRIME = 0x20,
298
	CREATE_PRIME = 0x20,
299
	CREATE_TEMPORARY = 0x40,
299
	CREATE_TEMPORARY = 0x40,
300
	CREATE_CACHED = 0x80,
300
	CREATE_CACHED = 0x80,
301
	CREATE_NO_RETIRE = 0x100,
301
	CREATE_NO_RETIRE = 0x100,
302
	CREATE_NO_THROTTLE = 0x200,
302
	CREATE_NO_THROTTLE = 0x200,
303
};
303
};
304
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
304
struct kgem_bo *kgem_create_2d(struct kgem *kgem,
305
			       int width,
305
			       int width,
306
			       int height,
306
			       int height,
307
			       int bpp,
307
			       int bpp,
308
			       int tiling,
308
			       int tiling,
309
			       uint32_t flags);
309
			       uint32_t flags);
310
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
310
struct kgem_bo *kgem_create_cpu_2d(struct kgem *kgem,
311
				   int width,
311
				   int width,
312
				   int height,
312
				   int height,
313
				   int bpp,
313
				   int bpp,
314
				   uint32_t flags);
314
				   uint32_t flags);
315
 
315
 
316
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
316
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format);
317
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
317
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset);
318
 
318
 
319
bool kgem_retire(struct kgem *kgem);
319
bool kgem_retire(struct kgem *kgem);
320
 
320
 
321
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
321
bool __kgem_ring_is_idle(struct kgem *kgem, int ring);
322
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
322
static inline bool kgem_ring_is_idle(struct kgem *kgem, int ring)
323
{
323
{
324
	ring = ring == KGEM_BLT;
324
	ring = ring == KGEM_BLT;
325
 
325
 
326
	if (list_is_empty(&kgem->requests[ring]))
326
	if (list_is_empty(&kgem->requests[ring]))
327
		return true;
327
		return true;
328
 
328
 
329
	return __kgem_ring_is_idle(kgem, ring);
329
	return __kgem_ring_is_idle(kgem, ring);
330
}
330
}
331
 
331
 
332
static inline bool kgem_is_idle(struct kgem *kgem)
332
static inline bool kgem_is_idle(struct kgem *kgem)
333
{
333
{
334
	if (!kgem->need_retire)
334
	if (!kgem->need_retire)
335
		return true;
335
		return true;
336
 
336
 
337
	return kgem_ring_is_idle(kgem, kgem->ring);
337
	return kgem_ring_is_idle(kgem, kgem->ring);
338
}
338
}
339
 
339
 
340
static inline bool __kgem_ring_empty(struct kgem *kgem)
340
static inline bool __kgem_ring_empty(struct kgem *kgem)
341
{
341
{
342
	return list_is_empty(&kgem->requests[kgem->ring == KGEM_BLT]);
342
	return list_is_empty(&kgem->requests[kgem->ring == KGEM_BLT]);
343
}
343
}
344
 
344
 
345
void _kgem_submit(struct kgem *kgem);
345
void _kgem_submit(struct kgem *kgem);
346
static inline void kgem_submit(struct kgem *kgem)
346
static inline void kgem_submit(struct kgem *kgem)
347
{
347
{
348
	if (kgem->nbatch)
348
	if (kgem->nbatch)
349
		_kgem_submit(kgem);
349
		_kgem_submit(kgem);
350
}
350
}
351
 
351
 
352
static inline bool kgem_flush(struct kgem *kgem, bool flush)
352
static inline bool kgem_flush(struct kgem *kgem, bool flush)
353
{
353
{
354
	if (kgem->nreloc == 0)
354
	if (kgem->nreloc == 0)
355
		return false;
355
		return false;
356
 
356
 
357
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
357
	return (kgem->flush ^ flush) && kgem_ring_is_idle(kgem, kgem->ring);
358
}
358
}
359
 
359
 
360
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
360
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo)
361
{
361
{
362
	if (bo->exec)
362
	if (bo->exec)
363
		_kgem_submit(kgem);
363
		_kgem_submit(kgem);
364
}
364
}
365
 
365
 
366
void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo);
366
void kgem_scanout_flush(struct kgem *kgem, struct kgem_bo *bo);
367
 
367
 
368
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
368
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo)
369
{
369
{
370
	assert(bo->refcnt);
370
	assert(bo->refcnt);
371
	bo->refcnt++;
371
	bo->refcnt++;
372
	return bo;
372
	return bo;
373
}
373
}
374
 
374
 
375
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
375
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
376
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
376
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo)
377
{
377
{
378
	assert(bo->refcnt);
378
	assert(bo->refcnt);
379
	if (--bo->refcnt == 0)
379
	if (--bo->refcnt == 0)
380
		_kgem_bo_destroy(kgem, bo);
380
		_kgem_bo_destroy(kgem, bo);
381
}
381
}
382
 
382
 
383
void kgem_clear_dirty(struct kgem *kgem);
383
void kgem_clear_dirty(struct kgem *kgem);
384
 
384
 
385
static inline void kgem_set_mode(struct kgem *kgem,
385
static inline void kgem_set_mode(struct kgem *kgem,
386
				 enum kgem_mode mode,
386
				 enum kgem_mode mode,
387
				 struct kgem_bo *bo)
387
				 struct kgem_bo *bo)
388
{
388
{
389
	assert(!kgem->wedged);
389
	assert(!kgem->wedged);
390
 
390
 
391
#if DEBUG_FLUSH_BATCH
391
#if DEBUG_FLUSH_BATCH
392
	kgem_submit(kgem);
392
	kgem_submit(kgem);
393
#endif
393
#endif
394
 
394
 
395
	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
395
	if (kgem->nreloc && bo->exec == NULL && kgem_ring_is_idle(kgem, kgem->ring))
396
		_kgem_submit(kgem);
396
		_kgem_submit(kgem);
397
 
397
 
398
	if (kgem->mode == mode)
398
	if (kgem->mode == mode)
399
		return;
399
		return;
400
 
400
 
401
//   kgem->context_switch(kgem, mode);
401
    kgem->context_switch(kgem, mode);
402
	kgem->mode = mode;
402
	kgem->mode = mode;
403
}
403
}
404
 
404
 
405
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
405
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode)
406
{
406
{
407
	assert(kgem->mode == KGEM_NONE);
407
	assert(kgem->mode == KGEM_NONE);
408
	assert(kgem->nbatch == 0);
408
	assert(kgem->nbatch == 0);
409
	assert(!kgem->wedged);
409
	assert(!kgem->wedged);
410
//   kgem->context_switch(kgem, mode);
410
    kgem->context_switch(kgem, mode);
411
	kgem->mode = mode;
411
	kgem->mode = mode;
412
}
412
}
413
 
413
 
414
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
414
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords)
415
{
415
{
416
	assert(num_dwords > 0);
416
	assert(num_dwords > 0);
417
	assert(kgem->nbatch < kgem->surface);
417
	assert(kgem->nbatch < kgem->surface);
418
	assert(kgem->surface <= kgem->batch_size);
418
	assert(kgem->surface <= kgem->batch_size);
419
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
419
	return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface);
420
}
420
}
421
 
421
 
422
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
422
static inline bool kgem_check_reloc(struct kgem *kgem, int n)
423
{
423
{
424
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
424
	assert(kgem->nreloc <= KGEM_RELOC_SIZE(kgem));
425
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
425
	return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem));
426
}
426
}
427
 
427
 
428
static inline bool kgem_check_exec(struct kgem *kgem, int n)
428
static inline bool kgem_check_exec(struct kgem *kgem, int n)
429
{
429
{
430
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
430
	assert(kgem->nexec <= KGEM_EXEC_SIZE(kgem));
431
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
431
	return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem));
432
}
432
}
433
 
433
 
434
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
434
static inline bool kgem_check_reloc_and_exec(struct kgem *kgem, int n)
435
{
435
{
436
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
436
	return kgem_check_reloc(kgem, n) && kgem_check_exec(kgem, n);
437
}
437
}
438
 
438
 
439
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
439
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem,
440
						  int num_dwords,
440
						  int num_dwords,
441
						  int num_surfaces)
441
						  int num_surfaces)
442
{
442
{
443
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
443
	return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) &&
444
		kgem_check_reloc(kgem, num_surfaces) &&
444
		kgem_check_reloc(kgem, num_surfaces) &&
445
		kgem_check_exec(kgem, num_surfaces);
445
		kgem_check_exec(kgem, num_surfaces);
446
}
446
}
447
 
447
 
448
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
448
static inline uint32_t *kgem_get_batch(struct kgem *kgem)
449
{
449
{
450
	if (kgem->nreloc) {
450
	if (kgem->nreloc) {
451
		unsigned mode = kgem->mode;
451
		unsigned mode = kgem->mode;
452
		_kgem_submit(kgem);
452
		_kgem_submit(kgem);
453
		_kgem_set_mode(kgem, mode);
453
		_kgem_set_mode(kgem, mode);
454
	}
454
	}
455
 
455
 
456
	return kgem->batch + kgem->nbatch;
456
	return kgem->batch + kgem->nbatch;
457
}
457
}
458
 
458
 
459
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
459
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0)));
460
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
460
bool kgem_check_bo_fenced(struct kgem *kgem, struct kgem_bo *bo);
461
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
461
bool kgem_check_many_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0)));
462
 
462
 
463
#define KGEM_RELOC_FENCED 0x8000
463
#define KGEM_RELOC_FENCED 0x8000
464
uint32_t kgem_add_reloc(struct kgem *kgem,
464
uint32_t kgem_add_reloc(struct kgem *kgem,
465
			uint32_t pos,
465
			uint32_t pos,
466
			struct kgem_bo *bo,
466
			struct kgem_bo *bo,
467
			uint32_t read_write_domains,
467
			uint32_t read_write_domains,
468
			uint32_t delta);
468
			uint32_t delta);
469
 
469
 
470
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
470
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo);
471
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
471
void *kgem_bo_map__async(struct kgem *kgem, struct kgem_bo *bo);
472
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
472
void *kgem_bo_map__gtt(struct kgem *kgem, struct kgem_bo *bo);
473
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
473
void kgem_bo_sync__gtt(struct kgem *kgem, struct kgem_bo *bo);
474
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
474
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo);
475
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
475
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
476
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
476
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo);
477
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
477
void kgem_bo_sync__cpu_full(struct kgem *kgem, struct kgem_bo *bo, bool write);
478
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
478
void *__kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo);
479
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
479
void __kgem_bo_unmap__cpu(struct kgem *kgem, struct kgem_bo *bo, void *ptr);
480
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
480
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo);
481
 
481
 
482
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
482
bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo,
483
		   const void *data, int length);
483
		   const void *data, int length);
484
 
484
 
485
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
485
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo);
486
void kgem_get_tile_size(struct kgem *kgem, int tiling,
486
void kgem_get_tile_size(struct kgem *kgem, int tiling,
487
			int *tile_width, int *tile_height, int *tile_size);
487
			int *tile_width, int *tile_height, int *tile_size);
488
 
488
 
489
static inline int __kgem_buffer_size(struct kgem_bo *bo)
489
static inline int __kgem_buffer_size(struct kgem_bo *bo)
490
{
490
{
491
	assert(bo->proxy != NULL);
491
	assert(bo->proxy != NULL);
492
	return bo->size.bytes;
492
	return bo->size.bytes;
493
}
493
}
494
 
494
 
495
static inline int __kgem_bo_size(struct kgem_bo *bo)
495
static inline int __kgem_bo_size(struct kgem_bo *bo)
496
{
496
{
497
	assert(bo->proxy == NULL);
497
	assert(bo->proxy == NULL);
498
	return PAGE_SIZE * bo->size.pages.count;
498
	return PAGE_SIZE * bo->size.pages.count;
499
}
499
}
500
 
500
 
501
static inline int kgem_bo_size(struct kgem_bo *bo)
501
static inline int kgem_bo_size(struct kgem_bo *bo)
502
{
502
{
503
	if (bo->proxy)
503
	if (bo->proxy)
504
		return __kgem_buffer_size(bo);
504
		return __kgem_buffer_size(bo);
505
	else
505
	else
506
		return __kgem_bo_size(bo);
506
		return __kgem_bo_size(bo);
507
}
507
}
508
 
508
 
509
/*
509
/*
510
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
510
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem,
511
					   struct kgem_bo *bo)
511
					   struct kgem_bo *bo)
512
{
512
{
513
	int pitch = bo->pitch;
513
	int pitch = bo->pitch;
514
	if (kgem->gen >= 040 && bo->tiling)
514
	if (kgem->gen >= 040 && bo->tiling)
515
		pitch /= 4;
515
		pitch /= 4;
516
	if (pitch > MAXSHORT) {
516
	if (pitch > MAXSHORT) {
517
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
517
		DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n",
518
		     __FUNCTION__, bo->handle, pitch));
518
		     __FUNCTION__, bo->handle, pitch));
519
		return false;
519
		return false;
520
	}
520
	}
521
 
521
 
522
	return true;
522
	return true;
523
}
523
}
524
 
524
 
525
static inline bool kgem_bo_can_blt(struct kgem *kgem,
525
static inline bool kgem_bo_can_blt(struct kgem *kgem,
526
				   struct kgem_bo *bo)
526
				   struct kgem_bo *bo)
527
{
527
{
528
	if (bo->tiling == I915_TILING_Y) {
528
	if (bo->tiling == I915_TILING_Y) {
529
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
529
		DBG(("%s: can not blt to handle=%d, tiling=Y\n",
530
		     __FUNCTION__, bo->handle));
530
		     __FUNCTION__, bo->handle));
531
		return false;
531
		return false;
532
	}
532
	}
533
 
533
 
534
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
534
	return kgem_bo_blt_pitch_is_ok(kgem, bo);
535
}
535
}
536
*/
536
*/
537
 
537
 
538
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
538
static inline bool __kgem_bo_is_mappable(struct kgem *kgem,
539
				       struct kgem_bo *bo)
539
				       struct kgem_bo *bo)
540
{
540
{
541
	if (bo->domain == DOMAIN_GTT)
541
	if (bo->domain == DOMAIN_GTT)
542
		return true;
542
		return true;
543
 
543
 
544
	if (kgem->gen < 040 && bo->tiling &&
544
	if (kgem->gen < 040 && bo->tiling &&
545
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
545
	    bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1))
546
		return false;
546
		return false;
547
 
547
 
548
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
548
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
549
		return false;
549
		return false;
550
 
550
 
551
	if (kgem->has_llc && bo->tiling == I915_TILING_NONE)
551
	if (kgem->has_llc && bo->tiling == I915_TILING_NONE)
552
		return true;
552
		return true;
553
 
553
 
554
	if (!bo->presumed_offset)
554
	if (!bo->presumed_offset)
555
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
555
		return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
556
 
556
 
557
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
557
	return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable;
558
}
558
}
559
 
559
 
560
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
560
static inline bool kgem_bo_is_mappable(struct kgem *kgem,
561
				       struct kgem_bo *bo)
561
				       struct kgem_bo *bo)
562
{
562
{
563
	DBG(("%s: domain=%d, offset: %d size: %d\n",
563
	DBG(("%s: domain=%d, offset: %d size: %d\n",
564
	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
564
	     __FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo)));
565
	assert(bo->refcnt);
565
	assert(bo->refcnt);
566
	return __kgem_bo_is_mappable(kgem, bo);
566
	return __kgem_bo_is_mappable(kgem, bo);
567
}
567
}
568
 
568
 
569
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
569
static inline bool kgem_bo_mapped(struct kgem *kgem, struct kgem_bo *bo)
570
{
570
{
571
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
571
	DBG(("%s: map=%p, tiling=%d, domain=%d\n",
572
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
572
	     __FUNCTION__, bo->map, bo->tiling, bo->domain));
573
	assert(bo->refcnt);
573
	assert(bo->refcnt);
574
 
574
 
575
	if (bo->map == NULL)
575
	if (bo->map == NULL)
576
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
576
		return bo->tiling == I915_TILING_NONE && bo->domain == DOMAIN_CPU;
577
 
577
 
578
	return IS_CPU_MAP(bo->map) == !bo->tiling;
578
	return IS_CPU_MAP(bo->map) == !bo->tiling;
579
}
579
}
580
 
580
 
581
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
581
static inline bool kgem_bo_can_map(struct kgem *kgem, struct kgem_bo *bo)
582
{
582
{
583
	if (kgem_bo_mapped(kgem, bo))
583
	if (kgem_bo_mapped(kgem, bo))
584
		return true;
584
		return true;
585
 
585
 
586
	if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
586
	if (!bo->tiling && (kgem->has_llc || bo->domain == DOMAIN_CPU))
587
		return true;
587
		return true;
588
 
588
 
589
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
589
	if (kgem->gen == 021 && bo->tiling == I915_TILING_Y)
590
		return false;
590
		return false;
591
 
591
 
592
	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
592
	return kgem_bo_size(bo) <= kgem->aperture_mappable / 4;
593
}
593
}
594
 
594
 
595
static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
595
static inline bool kgem_bo_can_map__cpu(struct kgem *kgem,
596
					struct kgem_bo *bo,
596
					struct kgem_bo *bo,
597
					bool write)
597
					bool write)
598
{
598
{
599
	if (bo->purged || (bo->scanout && write))
599
	if (bo->purged || (bo->scanout && write))
600
		return false;
600
		return false;
601
 
601
 
602
	if (kgem->has_llc)
602
	if (kgem->has_llc)
603
		return true;
603
		return true;
604
 
604
 
605
	if (bo->domain != DOMAIN_CPU)
605
	if (bo->domain != DOMAIN_CPU)
606
		return false;
606
		return false;
607
 
607
 
608
	return !write || bo->exec == NULL;
608
	return !write || bo->exec == NULL;
609
}
609
}
610
 
610
 
611
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
611
static inline bool kgem_bo_is_snoop(struct kgem_bo *bo)
612
{
612
{
613
	assert(bo->refcnt);
613
	assert(bo->refcnt);
614
	while (bo->proxy)
614
	while (bo->proxy)
615
		bo = bo->proxy;
615
		bo = bo->proxy;
616
	return bo->snoop;
616
	return bo->snoop;
617
}
617
}
618
 
618
 
619
void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo);
619
void kgem_bo_undo(struct kgem *kgem, struct kgem_bo *bo);
620
 
620
 
621
bool __kgem_busy(struct kgem *kgem, int handle);
621
bool __kgem_busy(struct kgem *kgem, int handle);
622
 
622
 
623
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
623
static inline void kgem_bo_mark_busy(struct kgem_bo *bo, int ring)
624
{
624
{
625
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
625
	bo->rq = (struct kgem_request *)((uintptr_t)bo->rq | ring);
626
}
626
}
627
 
627
 
628
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
628
inline static void __kgem_bo_clear_busy(struct kgem_bo *bo)
629
{
629
{
630
	bo->rq = NULL;
630
	bo->rq = NULL;
631
	list_del(&bo->request);
631
	list_del(&bo->request);
632
 
632
 
633
	bo->domain = DOMAIN_NONE;
633
	bo->domain = DOMAIN_NONE;
634
	bo->needs_flush = false;
634
	bo->needs_flush = false;
635
	bo->gtt_dirty = false;
635
	bo->gtt_dirty = false;
636
}
636
}
637
 
637
 
638
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
638
static inline bool kgem_bo_is_busy(struct kgem_bo *bo)
639
{
639
{
640
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
640
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
641
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
641
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
642
	assert(bo->refcnt);
642
	assert(bo->refcnt);
643
	return bo->rq;
643
	return bo->rq;
644
}
644
}
645
 
645
 
646
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
646
static inline bool __kgem_bo_is_busy(struct kgem *kgem, struct kgem_bo *bo)
647
{
647
{
648
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
648
	DBG(("%s: handle=%d, domain: %d exec? %d, rq? %d\n", __FUNCTION__,
649
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
649
	     bo->handle, bo->domain, bo->exec != NULL, bo->rq != NULL));
650
	assert(bo->refcnt);
650
	assert(bo->refcnt);
651
 
651
 
652
	if (bo->exec)
652
	if (bo->exec)
653
		return true;
653
		return true;
654
 
654
 
655
	if (kgem_flush(kgem, bo->flush))
655
	if (kgem_flush(kgem, bo->flush))
656
		kgem_submit(kgem);
656
		kgem_submit(kgem);
657
 
657
 
658
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
658
	if (bo->rq && !__kgem_busy(kgem, bo->handle))
659
		__kgem_bo_clear_busy(bo);
659
		__kgem_bo_clear_busy(bo);
660
 
660
 
661
	return kgem_bo_is_busy(bo);
661
	return kgem_bo_is_busy(bo);
662
}
662
}
663
 
663
 
664
static inline bool kgem_bo_is_render(struct kgem_bo *bo)
664
static inline bool kgem_bo_is_render(struct kgem_bo *bo)
665
{
665
{
666
	DBG(("%s: handle=%d, rq? %d [%d]\n", __FUNCTION__,
666
	DBG(("%s: handle=%d, rq? %d [%d]\n", __FUNCTION__,
667
	     bo->handle, bo->rq != NULL, (int)RQ_RING(bo->rq)));
667
	     bo->handle, bo->rq != NULL, (int)RQ_RING(bo->rq)));
668
	assert(bo->refcnt);
668
	assert(bo->refcnt);
669
	return bo->rq && RQ_RING(bo->rq) == I915_EXEC_RENDER;
669
	return bo->rq && RQ_RING(bo->rq) == I915_EXEC_RENDER;
670
}
670
}
671
 
671
 
672
static inline void kgem_bo_mark_unreusable(struct kgem_bo *bo)
672
static inline void kgem_bo_mark_unreusable(struct kgem_bo *bo)
673
{
673
{
674
	while (bo->proxy) {
674
	while (bo->proxy) {
675
		bo->flush = true;
675
		bo->flush = true;
676
		bo = bo->proxy;
676
		bo = bo->proxy;
677
	}
677
	}
678
	bo->flush = true;
678
	bo->flush = true;
679
	bo->reusable = false;
679
	bo->reusable = false;
680
}
680
}
681
 
681
 
682
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
682
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo)
683
{
683
{
684
	if (bo == NULL)
684
	if (bo == NULL)
685
		return false;
685
		return false;
686
 
686
 
687
	assert(bo->refcnt);
687
	assert(bo->refcnt);
688
	return bo->gpu_dirty;
688
	return bo->gpu_dirty;
689
}
689
}
690
 
690
 
691
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
691
static inline void kgem_bo_unclean(struct kgem *kgem, struct kgem_bo *bo)
692
{
692
{
693
	/* The bo is outside of our control, so presume it is written to */
693
	/* The bo is outside of our control, so presume it is written to */
694
	bo->needs_flush = true;
694
	bo->needs_flush = true;
695
	if (bo->rq == NULL)
695
	if (bo->rq == NULL)
696
		bo->rq = (void *)kgem;
696
		bo->rq = (void *)kgem;
697
 
697
 
698
	if (bo->domain != DOMAIN_GPU)
698
	if (bo->domain != DOMAIN_GPU)
699
		bo->domain = DOMAIN_NONE;
699
		bo->domain = DOMAIN_NONE;
700
}
700
}
701
 
701
 
702
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
702
static inline void __kgem_bo_mark_dirty(struct kgem_bo *bo)
703
{
703
{
704
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
704
	DBG(("%s: handle=%d (proxy? %d)\n", __FUNCTION__,
705
	     bo->handle, bo->proxy != NULL));
705
	     bo->handle, bo->proxy != NULL));
706
 
706
 
707
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
707
	bo->exec->flags |= LOCAL_EXEC_OBJECT_WRITE;
708
	bo->needs_flush = bo->gpu_dirty = true;
708
	bo->needs_flush = bo->gpu_dirty = true;
709
	list_move(&bo->request, &RQ(bo->rq)->buffers);
709
	list_move(&bo->request, &RQ(bo->rq)->buffers);
710
}
710
}
711
 
711
 
712
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
712
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo)
713
{
713
{
714
	assert(bo->refcnt);
714
	assert(bo->refcnt);
715
	do {
715
	do {
716
		assert(bo->exec);
716
		assert(bo->exec);
717
		assert(bo->rq);
717
		assert(bo->rq);
718
 
718
 
719
		if (bo->gpu_dirty)
719
		if (bo->gpu_dirty)
720
			return;
720
			return;
721
 
721
 
722
		__kgem_bo_mark_dirty(bo);
722
		__kgem_bo_mark_dirty(bo);
723
	} while ((bo = bo->proxy));
723
	} while ((bo = bo->proxy));
724
}
724
}
725
 
725
 
726
#define KGEM_BUFFER_WRITE	0x1
726
#define KGEM_BUFFER_WRITE	0x1
727
#define KGEM_BUFFER_INPLACE	0x2
727
#define KGEM_BUFFER_INPLACE	0x2
728
#define KGEM_BUFFER_LAST	0x4
728
#define KGEM_BUFFER_LAST	0x4
729
 
729
 
730
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
730
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE)
731
 
731
 
732
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
732
struct kgem_bo *kgem_create_buffer(struct kgem *kgem,
733
				   uint32_t size, uint32_t flags,
733
				   uint32_t size, uint32_t flags,
734
				   void **ret);
734
				   void **ret);
735
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
735
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem,
736
				      int width, int height, int bpp,
736
				      int width, int height, int bpp,
737
				      uint32_t flags,
737
				      uint32_t flags,
738
				      void **ret);
738
				      void **ret);
739
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
739
bool kgem_buffer_is_inplace(struct kgem_bo *bo);
740
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
740
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo);
741
 
741
 
742
void kgem_throttle(struct kgem *kgem);
742
void kgem_throttle(struct kgem *kgem);
743
#define MAX_INACTIVE_TIME 10
743
#define MAX_INACTIVE_TIME 10
744
bool kgem_expire_cache(struct kgem *kgem);
744
bool kgem_expire_cache(struct kgem *kgem);
745
void kgem_purge_cache(struct kgem *kgem);
745
void kgem_purge_cache(struct kgem *kgem);
746
void kgem_cleanup_cache(struct kgem *kgem);
746
void kgem_cleanup_cache(struct kgem *kgem);
747
 
747
 
748
void kgem_clean_scanout_cache(struct kgem *kgem);
748
void kgem_clean_scanout_cache(struct kgem *kgem);
749
void kgem_clean_large_cache(struct kgem *kgem);
749
void kgem_clean_large_cache(struct kgem *kgem);
750
 
750
 
751
#if HAS_DEBUG_FULL
751
#if HAS_DEBUG_FULL
752
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
752
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch);
753
#else
753
#else
754
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
754
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch)
755
{
755
{
756
	(void)kgem;
756
	(void)kgem;
757
	(void)nbatch;
757
	(void)nbatch;
758
}
758
}
759
#endif
759
#endif
760
 
760
 
761
#endif /* KGEM_H */
761
#endif /* KGEM_H */
762
 
762
 
763
>
763
>
764
 
764
 
765
>
765
>
766
#define>
766
#define>
767
#define>
767
#define>