Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4315 Serge 1
/* -*- c-basic-offset: 4 -*- */
2
/*
3
 * Copyright © 2006 Intel Corporation
4
 *
5
 * Permission is hereby granted, free of charge, to any person obtaining a
6
 * copy of this software and associated documentation files (the "Software"),
7
 * to deal in the Software without restriction, including without limitation
8
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9
 * and/or sell copies of the Software, and to permit persons to whom the
10
 * Software is furnished to do so, subject to the following conditions:
11
 *
12
 * The above copyright notice and this permission notice (including the next
13
 * paragraph) shall be included in all copies or substantial portions of the
14
 * Software.
15
 *
16
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
 * SOFTWARE.
23
 *
24
 * Authors:
25
 *    Eric Anholt 
26
 *
27
 */
28
 
29
#ifdef HAVE_CONFIG_H
30
#include "config.h"
31
#endif
32
 
33
#include 
34
#include 
35
#include 
36
#include 
37
 
38
//#include "xf86.h"
39
#include "intel.h"
40
#include "i830_reg.h"
41
#include "i915_drm.h"
42
#include "i965_reg.h"
43
 
44
//#include "uxa.h"
45
 
46
#define DUMP_BATCHBUFFERS NULL // "/tmp/i915-batchbuffers.dump"
47
 
48
static void intel_end_vertex(intel_screen_private *intel)
49
{
50
	if (intel->vertex_bo) {
51
		if (intel->vertex_used) {
52
			dri_bo_subdata(intel->vertex_bo, 0, intel->vertex_used*4, intel->vertex_ptr);
53
			intel->vertex_used = 0;
54
		}
55
 
56
		dri_bo_unreference(intel->vertex_bo);
57
		intel->vertex_bo = NULL;
58
	}
59
 
60
	intel->vertex_id = 0;
61
}
62
 
63
void intel_next_vertex(intel_screen_private *intel)
64
{
65
	intel_end_vertex(intel);
66
 
67
	intel->vertex_bo =
68
		dri_bo_alloc(intel->bufmgr, "vertex", sizeof (intel->vertex_ptr), 4096);
69
}
70
 
71
static dri_bo *bo_alloc()
72
{
73
	intel_screen_private *intel = intel_get_screen_private();
74
	int size = 4 * 4096;
75
	/* The 865 has issues with larger-than-page-sized batch buffers. */
76
	if (IS_I865G(intel))
77
		size = 4096;
78
	return dri_bo_alloc(intel->bufmgr, "batch", size, 4096);
79
}
80
 
81
static void intel_next_batch(int mode)
82
{
83
	intel_screen_private *intel = intel_get_screen_private();
84
	dri_bo *tmp;
85
 
86
	drm_intel_gem_bo_clear_relocs(intel->batch_bo, 0);
87
 
88
	tmp = intel->last_batch_bo[mode];
89
	intel->last_batch_bo[mode] = intel->batch_bo;
90
	intel->batch_bo = tmp;
91
 
92
	intel->batch_used = 0;
93
 
94
	/* We don't know when another client has executed, so we have
95
	 * to reinitialize our 3D state per batch.
96
	 */
97
	intel->last_3d = LAST_3D_OTHER;
98
}
99
 
100
void intel_batch_init()
101
{
102
	intel_screen_private *intel = intel_get_screen_private();
103
 
104
    ENTER();
105
 
106
	intel->batch_emit_start = 0;
107
	intel->batch_emitting = 0;
108
	intel->vertex_id = 0;
109
 
110
	intel->last_batch_bo[0] = bo_alloc();
111
	intel->last_batch_bo[1] = bo_alloc();
112
 
113
	intel->batch_bo = bo_alloc();
114
	intel->batch_used = 0;
115
	intel->last_3d = LAST_3D_OTHER;
116
 
117
    LEAVE();
118
}
119
 
120
void intel_batch_teardown()
121
{
122
	intel_screen_private *intel = intel_get_screen_private();
123
	int i;
124
 
125
	for (i = 0; i < ARRAY_SIZE(intel->last_batch_bo); i++) {
126
		if (intel->last_batch_bo[i] != NULL) {
127
			dri_bo_unreference(intel->last_batch_bo[i]);
128
			intel->last_batch_bo[i] = NULL;
129
		}
130
	}
131
 
132
	if (intel->batch_bo != NULL) {
133
		dri_bo_unreference(intel->batch_bo);
134
		intel->batch_bo = NULL;
135
	}
136
 
137
	if (intel->vertex_bo) {
138
		dri_bo_unreference(intel->vertex_bo);
139
		intel->vertex_bo = NULL;
140
	}
141
 
142
	while (!list_is_empty(&intel->batch_pixmaps))
143
		list_del(intel->batch_pixmaps.next);
144
}
145
 
146
static void intel_batch_do_flush()
147
{
148
	intel_screen_private *intel = intel_get_screen_private();
149
	struct intel_pixmap *priv;
150
 
151
	list_for_each_entry(priv, &intel->batch_pixmaps, batch)
152
		priv->dirty = 0;
153
}
154
 
155
static void intel_emit_post_sync_nonzero_flush()
156
{
157
	intel_screen_private *intel = intel_get_screen_private();
158
 
159
	/* keep this entire sequence of 3 PIPE_CONTROL cmds in one batch to
160
	 * avoid upsetting the gpu. */
161
	BEGIN_BATCH(3*4);
162
	OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
163
	OUT_BATCH(BRW_PIPE_CONTROL_CS_STALL |
164
		  BRW_PIPE_CONTROL_STALL_AT_SCOREBOARD);
165
	OUT_BATCH(0); /* address */
166
	OUT_BATCH(0); /* write data */
167
 
168
	OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
169
	OUT_BATCH(BRW_PIPE_CONTROL_WRITE_QWORD);
170
	OUT_RELOC(intel->wa_scratch_bo,
171
		  I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, 0);
172
	OUT_BATCH(0); /* write data */
173
 
174
	/* now finally the _real flush */
175
	OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
176
	OUT_BATCH(BRW_PIPE_CONTROL_WC_FLUSH |
177
		  BRW_PIPE_CONTROL_TC_FLUSH |
178
		  BRW_PIPE_CONTROL_NOWRITE);
179
	OUT_BATCH(0); /* write address */
180
	OUT_BATCH(0); /* write data */
181
	ADVANCE_BATCH();
182
}
183
 
184
void intel_batch_emit_flush()
185
{
186
	intel_screen_private *intel = intel_get_screen_private();
187
	int flags;
188
 
189
	assert (!intel->in_batch_atomic);
190
 
191
	/* Big hammer, look to the pipelined flushes in future. */
192
	if ((INTEL_INFO(intel)->gen >= 060)) {
193
		if (intel->current_batch == BLT_BATCH) {
194
			BEGIN_BATCH_BLT(4);
195
			OUT_BATCH(MI_FLUSH_DW | 2);
196
			OUT_BATCH(0);
197
			OUT_BATCH(0);
198
			OUT_BATCH(0);
199
			ADVANCE_BATCH();
200
		} else  {
201
			if ((INTEL_INFO(intel)->gen == 060)) {
202
				/* HW-Workaround for Sandybdrige */
203
				intel_emit_post_sync_nonzero_flush();
204
			} else {
205
				BEGIN_BATCH(4);
206
				OUT_BATCH(BRW_PIPE_CONTROL | (4 - 2));
207
				OUT_BATCH(BRW_PIPE_CONTROL_WC_FLUSH |
208
					  BRW_PIPE_CONTROL_TC_FLUSH |
209
					  BRW_PIPE_CONTROL_NOWRITE);
210
				OUT_BATCH(0); /* write address */
211
				OUT_BATCH(0); /* write data */
212
				ADVANCE_BATCH();
213
			}
214
		}
215
	} else {
216
		flags = MI_WRITE_DIRTY_STATE | MI_INVALIDATE_MAP_CACHE;
217
		if (INTEL_INFO(intel)->gen >= 040)
218
			flags = 0;
219
 
220
		BEGIN_BATCH(1);
221
		OUT_BATCH(MI_FLUSH | flags);
222
		ADVANCE_BATCH();
223
	}
224
	intel_batch_do_flush();
225
}
226
 
227
void intel_batch_submit()
228
{
229
	intel_screen_private *intel = intel_get_screen_private();
230
	int ret;
231
 
232
	assert (!intel->in_batch_atomic);
233
 
234
	if (intel->vertex_flush)
235
		intel->vertex_flush(intel);
236
	intel_end_vertex(intel);
237
 
238
	if (intel->batch_flush)
239
		intel->batch_flush(intel);
240
 
241
	if (intel->batch_used == 0)
242
		return;
243
 
244
	/* Mark the end of the batchbuffer. */
245
	OUT_BATCH(MI_BATCH_BUFFER_END);
246
	/* Emit a padding dword if we aren't going to be quad-word aligned. */
247
	if (intel->batch_used & 1)
248
		OUT_BATCH(MI_NOOP);
249
 
250
	if (DUMP_BATCHBUFFERS) {
251
	    FILE *file = fopen(DUMP_BATCHBUFFERS, "a");
252
	    if (file) {
253
		fwrite (intel->batch_ptr, intel->batch_used*4, 1, file);
254
		fclose(file);
255
	    }
256
	}
257
 
258
	ret = dri_bo_subdata(intel->batch_bo, 0, intel->batch_used*4, intel->batch_ptr);
259
	if (ret == 0) {
260
		ret = drm_intel_bo_mrb_exec(intel->batch_bo,
261
				intel->batch_used*4,
262
				NULL, 0, 0xffffffff,
263
				(HAS_BLT(intel) ?
264
				 intel->current_batch:
265
				 I915_EXEC_DEFAULT));
266
	}
267
 
268
	if (ret != 0) {
269
		static int once;
270
		if (!once) {
271
			if (ret == -EIO) {
272
				/* The GPU has hung and unlikely to recover by this point. */
273
				printf("Detected a hung GPU, disabling acceleration.\n");
274
				printf("When reporting this, please include i915_error_state from debugfs and the full dmesg.\n");
275
			} else {
276
				/* The driver is broken. */
277
				printf("Failed to submit batch buffer, expect rendering corruption\n ");
278
			}
279
//			uxa_set_force_fallback(xf86ScrnToScreen(scrn), TRUE);
280
			intel->force_fallback = TRUE;
281
			once = 1;
282
		}
283
	}
284
 
285
	while (!list_is_empty(&intel->batch_pixmaps)) {
286
		struct intel_pixmap *entry;
287
 
288
		entry = list_first_entry(&intel->batch_pixmaps,
289
					 struct intel_pixmap,
290
					 batch);
291
 
292
		entry->busy = -1;
293
		entry->dirty = 0;
294
		list_del(&entry->batch);
295
	}
296
 
297
	if (intel->debug_flush & DEBUG_FLUSH_WAIT)
298
		drm_intel_bo_wait_rendering(intel->batch_bo);
299
 
300
	intel_next_batch(intel->current_batch == I915_EXEC_BLT);
301
 
302
	if (intel->batch_commit_notify)
303
		intel->batch_commit_notify(intel);
304
 
305
	intel->current_batch = 0;
306
}
307
 
308
void intel_debug_flush()
309
{
310
	intel_screen_private *intel = intel_get_screen_private();
311
 
312
	if (intel->debug_flush & DEBUG_FLUSH_CACHES)
313
		intel_batch_emit_flush();
314
 
315
	if (intel->debug_flush & DEBUG_FLUSH_BATCHES)
316
		intel_batch_submit();
317
}