Subversion Repositories Kolibri OS

Rev

Rev 4377 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4377 Rev 4501
1
/**************************************************************************
1
/**************************************************************************
2
 
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
4
Copyright © 2002 by David Dawes
5
 
5
 
6
All Rights Reserved.
6
All Rights Reserved.
7
 
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
13
the Software is furnished to do so, subject to the following conditions:
14
 
14
 
15
The above copyright notice and this permission notice (including the next
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
17
Software.
18
 
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
26
 
27
**************************************************************************/
27
**************************************************************************/
28
 
28
 
29
/*
29
/*
30
 * Authors: Jeff Hartmann 
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
32
 *          David Dawes 
33
 *          Alan Hourihane 
33
 *          Alan Hourihane 
34
 */
34
 */
35
 
35
 
36
#ifdef HAVE_CONFIG_H
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
37
#include "config.h"
38
#endif
38
#endif
39
 
39
 
40
#include 
40
#include 
41
#include 
41
#include 
42
#include "i915_pciids.h"
42
#include "i915_pciids.h"
43
 
43
 
44
#include "compiler.h"
44
#include "compiler.h"
45
#include "sna.h"
45
#include "sna.h"
46
#include "sna_reg.h"
46
#include "sna_reg.h"
47
 
47
 
48
#include 
48
#include 
49
#include "../pixdriver.h"
49
#include "../pixdriver.h"
50
 
50
 
51
#include 
51
#include 
52
 
52
 
53
#define to_surface(x) (surface_t*)((x)->handle)
53
#define to_surface(x) (surface_t*)((x)->handle)
54
 
54
 
55
typedef struct {
55
typedef struct {
56
    int l;
56
    int l;
57
    int t;
57
    int t;
58
    int r;
58
    int r;
59
    int b;
59
    int b;
60
} rect_t;
60
} rect_t;
61
 
61
 
62
static struct sna_fb sna_fb;
62
static struct sna_fb sna_fb;
63
static int    tls_mask;
63
static int    tls_mask;
64
 
64
 
65
int tls_alloc(void);
65
int tls_alloc(void);
66
 
66
 
67
static inline void *tls_get(int key)
67
static inline void *tls_get(int key)
68
{
68
{
69
    void *val;
69
    void *val;
70
    __asm__ __volatile__(
70
    __asm__ __volatile__(
71
    "movl %%fs:(%1), %0"
71
    "movl %%fs:(%1), %0"
72
    :"=r"(val)
72
    :"=r"(val)
73
    :"r"(key));
73
    :"r"(key));
74
 
74
 
75
  return val;
75
  return val;
76
};
76
};
77
 
77
 
78
static inline int
78
static inline int
79
tls_set(int key, const void *ptr)
79
tls_set(int key, const void *ptr)
80
{
80
{
81
    if(!(key & 3))
81
    if(!(key & 3))
82
    {
82
    {
83
        __asm__ __volatile__(
83
        __asm__ __volatile__(
84
        "movl %0, %%fs:(%1)"
84
        "movl %0, %%fs:(%1)"
85
        ::"r"(ptr),"r"(key));
85
        ::"r"(ptr),"r"(key));
86
        return 0;
86
        return 0;
87
    }
87
    }
88
    else return -1;
88
    else return -1;
89
}
89
}
90
 
90
 
91
 
91
 
92
 
92
 
93
 
93
 
94
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
94
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
95
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
95
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
96
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
96
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
97
				  unsigned flags, uint32_t width, uint32_t height,
97
				  unsigned flags, uint32_t width, uint32_t height,
98
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
98
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
99
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
99
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
100
                        int pitch, int height);
100
                        int pitch, int height);
101
 
101
 
102
void kgem_close_batches(struct kgem *kgem);
102
void kgem_close_batches(struct kgem *kgem);
103
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
103
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
104
 
104
 
105
 
105
 
106
static bool sna_solid_cache_init(struct sna *sna);
106
static bool sna_solid_cache_init(struct sna *sna);
107
 
107
 
108
struct sna *sna_device;
108
struct sna *sna_device;
109
 
109
 
110
__LOCK_INIT_RECURSIVE(, __sna_lock);
110
__LOCK_INIT_RECURSIVE(, __sna_lock);
111
 
111
 
112
static void no_render_reset(struct sna *sna)
112
static void no_render_reset(struct sna *sna)
113
{
113
{
114
	(void)sna;
114
	(void)sna;
115
}
115
}
116
 
116
 
117
static void no_render_flush(struct sna *sna)
117
static void no_render_flush(struct sna *sna)
118
{
118
{
119
	(void)sna;
119
	(void)sna;
120
}
120
}
121
 
121
 
122
static void
122
static void
123
no_render_context_switch(struct kgem *kgem,
123
no_render_context_switch(struct kgem *kgem,
124
			 int new_mode)
124
			 int new_mode)
125
{
125
{
126
	if (!kgem->nbatch)
126
	if (!kgem->nbatch)
127
		return;
127
		return;
128
 
128
 
129
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
129
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
130
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
130
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
131
		_kgem_submit(kgem);
131
		_kgem_submit(kgem);
132
	}
132
	}
133
 
133
 
134
	(void)new_mode;
134
	(void)new_mode;
135
}
135
}
136
 
136
 
137
static void
137
static void
138
no_render_retire(struct kgem *kgem)
138
no_render_retire(struct kgem *kgem)
139
{
139
{
140
	(void)kgem;
140
	(void)kgem;
141
}
141
}
142
 
142
 
143
static void
143
static void
144
no_render_expire(struct kgem *kgem)
144
no_render_expire(struct kgem *kgem)
145
{
145
{
146
	(void)kgem;
146
	(void)kgem;
147
}
147
}
148
 
148
 
149
static void
149
static void
150
no_render_fini(struct sna *sna)
150
no_render_fini(struct sna *sna)
151
{
151
{
152
	(void)sna;
152
	(void)sna;
153
}
153
}
154
 
154
 
155
const char *no_render_init(struct sna *sna)
155
const char *no_render_init(struct sna *sna)
156
{
156
{
157
    struct sna_render *render = &sna->render;
157
    struct sna_render *render = &sna->render;
158
 
158
 
159
    memset (render,0, sizeof (*render));
159
    memset (render,0, sizeof (*render));
160
 
160
 
161
    render->prefer_gpu = PREFER_GPU_BLT;
161
    render->prefer_gpu = PREFER_GPU_BLT;
162
 
162
 
163
    render->vertices = render->vertex_data;
163
    render->vertices = render->vertex_data;
164
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
164
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
165
 
165
 
166
    render->reset = no_render_reset;
166
    render->reset = no_render_reset;
167
	render->flush = no_render_flush;
167
	render->flush = no_render_flush;
168
	render->fini = no_render_fini;
168
	render->fini = no_render_fini;
169
 
169
 
170
	sna->kgem.context_switch = no_render_context_switch;
170
	sna->kgem.context_switch = no_render_context_switch;
171
	sna->kgem.retire = no_render_retire;
171
	sna->kgem.retire = no_render_retire;
172
	sna->kgem.expire = no_render_expire;
172
	sna->kgem.expire = no_render_expire;
173
 
173
 
174
	sna->kgem.mode = KGEM_RENDER;
174
	sna->kgem.mode = KGEM_RENDER;
175
	sna->kgem.ring = KGEM_RENDER;
175
	sna->kgem.ring = KGEM_RENDER;
176
 
176
 
177
	sna_vertex_init(sna);
177
	sna_vertex_init(sna);
178
	return "generic";
178
	return "generic";
179
 }
179
 }
180
 
180
 
181
void sna_vertex_init(struct sna *sna)
181
void sna_vertex_init(struct sna *sna)
182
{
182
{
183
//    pthread_mutex_init(&sna->render.lock, NULL);
183
//    pthread_mutex_init(&sna->render.lock, NULL);
184
//    pthread_cond_init(&sna->render.wait, NULL);
184
//    pthread_cond_init(&sna->render.wait, NULL);
185
    sna->render.active = 0;
185
    sna->render.active = 0;
186
}
186
}
187
 
187
 
188
int sna_accel_init(struct sna *sna)
188
int sna_accel_init(struct sna *sna)
189
{
189
{
190
    const char *backend;
190
    const char *backend;
191
 
191
 
192
	backend = no_render_init(sna);
192
	backend = no_render_init(sna);
193
	if (sna->info->gen >= 0100)
193
	if (sna->info->gen >= 0100)
194
		(void)backend;
194
		(void)backend;
195
	else if (sna->info->gen >= 070)
195
	else if (sna->info->gen >= 070)
196
		backend = gen7_render_init(sna, backend);
196
		backend = gen7_render_init(sna, backend);
197
	else if (sna->info->gen >= 060)
197
	else if (sna->info->gen >= 060)
198
		backend = gen6_render_init(sna, backend);
198
		backend = gen6_render_init(sna, backend);
199
	else if (sna->info->gen >= 050)
199
	else if (sna->info->gen >= 050)
200
		backend = gen5_render_init(sna, backend);
200
		backend = gen5_render_init(sna, backend);
201
	else if (sna->info->gen >= 040)
201
	else if (sna->info->gen >= 040)
202
		backend = gen4_render_init(sna, backend);
202
		backend = gen4_render_init(sna, backend);
203
	else if (sna->info->gen >= 030)
203
	else if (sna->info->gen >= 030)
204
		backend = gen3_render_init(sna, backend);
204
		backend = gen3_render_init(sna, backend);
205
 
205
 
206
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
206
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
207
	     __FUNCTION__, backend, sna->render.prefer_gpu));
207
	     __FUNCTION__, backend, sna->render.prefer_gpu));
208
 
208
 
209
	kgem_reset(&sna->kgem);
209
	kgem_reset(&sna->kgem);
210
 
210
 
211
    sna_device = sna;
211
    sna_device = sna;
212
 
212
 
213
    return kgem_init_fb(&sna->kgem, &sna_fb);
213
    return kgem_init_fb(&sna->kgem, &sna_fb);
214
}
214
}
215
 
215
 
216
 
216
 
217
#if 0
217
#if 0
218
 
218
 
219
static bool sna_solid_cache_init(struct sna *sna)
219
static bool sna_solid_cache_init(struct sna *sna)
220
{
220
{
221
    struct sna_solid_cache *cache = &sna->render.solid_cache;
221
    struct sna_solid_cache *cache = &sna->render.solid_cache;
222
 
222
 
223
    DBG(("%s\n", __FUNCTION__));
223
    DBG(("%s\n", __FUNCTION__));
224
 
224
 
225
    cache->cache_bo =
225
    cache->cache_bo =
226
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
226
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
227
    if (!cache->cache_bo)
227
    if (!cache->cache_bo)
228
        return FALSE;
228
        return FALSE;
229
 
229
 
230
    /*
230
    /*
231
     * Initialise [0] with white since it is very common and filling the
231
     * Initialise [0] with white since it is very common and filling the
232
     * zeroth slot simplifies some of the checks.
232
     * zeroth slot simplifies some of the checks.
233
     */
233
     */
234
    cache->color[0] = 0xffffffff;
234
    cache->color[0] = 0xffffffff;
235
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
235
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
236
    cache->bo[0]->pitch = 4;
236
    cache->bo[0]->pitch = 4;
237
    cache->dirty = 1;
237
    cache->dirty = 1;
238
    cache->size = 1;
238
    cache->size = 1;
239
    cache->last = 0;
239
    cache->last = 0;
240
 
240
 
241
    return TRUE;
241
    return TRUE;
242
}
242
}
243
 
243
 
244
void
244
void
245
sna_render_flush_solid(struct sna *sna)
245
sna_render_flush_solid(struct sna *sna)
246
{
246
{
247
    struct sna_solid_cache *cache = &sna->render.solid_cache;
247
    struct sna_solid_cache *cache = &sna->render.solid_cache;
248
 
248
 
249
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
249
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
250
    assert(cache->dirty);
250
    assert(cache->dirty);
251
    assert(cache->size);
251
    assert(cache->size);
252
 
252
 
253
    kgem_bo_write(&sna->kgem, cache->cache_bo,
253
    kgem_bo_write(&sna->kgem, cache->cache_bo,
254
              cache->color, cache->size*sizeof(uint32_t));
254
              cache->color, cache->size*sizeof(uint32_t));
255
    cache->dirty = 0;
255
    cache->dirty = 0;
256
    cache->last = 0;
256
    cache->last = 0;
257
}
257
}
258
 
258
 
259
static void
259
static void
260
sna_render_finish_solid(struct sna *sna, bool force)
260
sna_render_finish_solid(struct sna *sna, bool force)
261
{
261
{
262
    struct sna_solid_cache *cache = &sna->render.solid_cache;
262
    struct sna_solid_cache *cache = &sna->render.solid_cache;
263
    int i;
263
    int i;
264
 
264
 
265
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
265
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
266
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
266
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
267
 
267
 
268
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
268
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
269
        return;
269
        return;
270
 
270
 
271
    if (cache->dirty)
271
    if (cache->dirty)
272
        sna_render_flush_solid(sna);
272
        sna_render_flush_solid(sna);
273
 
273
 
274
    for (i = 0; i < cache->size; i++) {
274
    for (i = 0; i < cache->size; i++) {
275
        if (cache->bo[i] == NULL)
275
        if (cache->bo[i] == NULL)
276
            continue;
276
            continue;
277
 
277
 
278
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
278
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
279
        cache->bo[i] = NULL;
279
        cache->bo[i] = NULL;
280
    }
280
    }
281
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
281
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
282
 
282
 
283
    DBG(("sna_render_finish_solid reset\n"));
283
    DBG(("sna_render_finish_solid reset\n"));
284
 
284
 
285
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
285
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
286
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
286
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
287
    cache->bo[0]->pitch = 4;
287
    cache->bo[0]->pitch = 4;
288
    if (force)
288
    if (force)
289
        cache->size = 1;
289
        cache->size = 1;
290
}
290
}
291
 
291
 
292
 
292
 
293
struct kgem_bo *
293
struct kgem_bo *
294
sna_render_get_solid(struct sna *sna, uint32_t color)
294
sna_render_get_solid(struct sna *sna, uint32_t color)
295
{
295
{
296
    struct sna_solid_cache *cache = &sna->render.solid_cache;
296
    struct sna_solid_cache *cache = &sna->render.solid_cache;
297
    int i;
297
    int i;
298
 
298
 
299
    DBG(("%s: %08x\n", __FUNCTION__, color));
299
    DBG(("%s: %08x\n", __FUNCTION__, color));
300
 
300
 
301
//    if ((color & 0xffffff) == 0) /* alpha only */
301
//    if ((color & 0xffffff) == 0) /* alpha only */
302
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
302
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
303
 
303
 
304
    if (color == 0xffffffff) {
304
    if (color == 0xffffffff) {
305
        DBG(("%s(white)\n", __FUNCTION__));
305
        DBG(("%s(white)\n", __FUNCTION__));
306
        return kgem_bo_reference(cache->bo[0]);
306
        return kgem_bo_reference(cache->bo[0]);
307
    }
307
    }
308
 
308
 
309
    if (cache->color[cache->last] == color) {
309
    if (cache->color[cache->last] == color) {
310
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
310
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
311
             cache->last, color));
311
             cache->last, color));
312
        return kgem_bo_reference(cache->bo[cache->last]);
312
        return kgem_bo_reference(cache->bo[cache->last]);
313
    }
313
    }
314
 
314
 
315
    for (i = 1; i < cache->size; i++) {
315
    for (i = 1; i < cache->size; i++) {
316
        if (cache->color[i] == color) {
316
        if (cache->color[i] == color) {
317
            if (cache->bo[i] == NULL) {
317
            if (cache->bo[i] == NULL) {
318
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
318
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
319
                     i, color));
319
                     i, color));
320
                goto create;
320
                goto create;
321
            } else {
321
            } else {
322
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
322
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
323
                     i, color));
323
                     i, color));
324
                goto done;
324
                goto done;
325
            }
325
            }
326
        }
326
        }
327
    }
327
    }
328
 
328
 
329
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
329
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
330
 
330
 
331
    i = cache->size++;
331
    i = cache->size++;
332
    cache->color[i] = color;
332
    cache->color[i] = color;
333
    cache->dirty = 1;
333
    cache->dirty = 1;
334
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
334
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
335
 
335
 
336
create:
336
create:
337
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
337
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
338
                     i*sizeof(uint32_t), sizeof(uint32_t));
338
                     i*sizeof(uint32_t), sizeof(uint32_t));
339
    cache->bo[i]->pitch = 4;
339
    cache->bo[i]->pitch = 4;
340
 
340
 
341
done:
341
done:
342
    cache->last = i;
342
    cache->last = i;
343
    return kgem_bo_reference(cache->bo[i]);
343
    return kgem_bo_reference(cache->bo[i]);
344
}
344
}
345
 
345
 
346
#endif
346
#endif
347
 
347
 
348
 
348
 
349
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
349
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
350
                  int w, int h, int src_x, int src_y)
350
                  int w, int h, int src_x, int src_y)
351
 
351
 
352
{
352
{
353
    struct sna_copy_op copy;
353
    struct sna_copy_op copy;
354
    struct _Pixmap src, dst;
354
    struct _Pixmap src, dst;
355
    struct kgem_bo *src_bo;
355
    struct kgem_bo *src_bo;
356
 
356
 
357
    char proc_info[1024];
357
    char proc_info[1024];
358
    int winx, winy;
358
    int winx, winy;
359
 
359
 
360
    get_proc_info(proc_info);
360
    get_proc_info(proc_info);
361
 
361
 
362
    winx = *(uint32_t*)(proc_info+34);
362
    winx = *(uint32_t*)(proc_info+34);
363
    winy = *(uint32_t*)(proc_info+38);
363
    winy = *(uint32_t*)(proc_info+38);
364
 
364
 
365
    memset(&src, 0, sizeof(src));
365
    memset(&src, 0, sizeof(src));
366
    memset(&dst, 0, sizeof(dst));
366
    memset(&dst, 0, sizeof(dst));
367
 
367
 
368
    src.drawable.bitsPerPixel = 32;
368
    src.drawable.bitsPerPixel = 32;
369
    src.drawable.width  = src_bitmap->width;
369
    src.drawable.width  = src_bitmap->width;
370
    src.drawable.height = src_bitmap->height;
370
    src.drawable.height = src_bitmap->height;
371
 
371
 
372
    dst.drawable.bitsPerPixel = 32;
372
    dst.drawable.bitsPerPixel = 32;
373
    dst.drawable.width  = sna_fb.width;
373
    dst.drawable.width  = sna_fb.width;
374
    dst.drawable.height = sna_fb.height;
374
    dst.drawable.height = sna_fb.height;
375
 
375
 
376
    memset(©, 0, sizeof(copy));
376
    memset(©, 0, sizeof(copy));
377
 
377
 
378
    src_bo = (struct kgem_bo*)src_bitmap->handle;
378
    src_bo = (struct kgem_bo*)src_bitmap->handle;
379
 
379
 
380
    if( sna_device->render.copy(sna_device, GXcopy,
380
    if( sna_device->render.copy(sna_device, GXcopy,
381
                                &src, src_bo,
381
                                &src, src_bo,
382
                                &dst, sna_fb.fb_bo, ©) )
382
                                &dst, sna_fb.fb_bo, ©) )
383
    {
383
    {
384
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
384
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
385
        copy.done(sna_device, ©);
385
        copy.done(sna_device, ©);
386
    }
386
    }
387
 
387
 
388
    kgem_submit(&sna_device->kgem);
388
    kgem_submit(&sna_device->kgem);
389
 
389
 
390
    return 0;
390
    return 0;
391
 
391
 
392
//    __asm__ __volatile__("int3");
392
//    __asm__ __volatile__("int3");
393
 
393
 
394
};
394
};
395
 
395
 
396
typedef struct
396
typedef struct
397
{
397
{
398
    uint32_t        width;
398
    uint32_t        width;
399
    uint32_t        height;
399
    uint32_t        height;
400
    void           *data;
400
    void           *data;
401
    uint32_t        pitch;
401
    uint32_t        pitch;
402
    struct kgem_bo *bo;
402
    struct kgem_bo *bo;
403
    uint32_t        bo_size;
403
    uint32_t        bo_size;
404
    uint32_t        flags;
404
    uint32_t        flags;
405
}surface_t;
405
}surface_t;
406
 
406
 
407
 
407
 
408
 
408
 
409
 
409
 
410
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
410
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
411
#define MI_WAIT_FOR_EVENT			(0x03<<23)
411
#define MI_WAIT_FOR_EVENT			(0x03<<23)
412
 
412
 
413
static bool sna_emit_wait_for_scanline_hsw(struct sna *sna,
413
static bool sna_emit_wait_for_scanline_hsw(struct sna *sna,
414
                        rect_t *crtc,
414
                        rect_t *crtc,
415
                        int pipe, int y1, int y2,
415
                        int pipe, int y1, int y2,
416
                        bool full_height)
416
                        bool full_height)
417
{
417
{
418
    uint32_t event;
418
    uint32_t event;
419
    uint32_t *b;
419
    uint32_t *b;
420
 
420
 
421
    if (!sna->kgem.has_secure_batches)
421
    if (!sna->kgem.has_secure_batches)
422
        return false;
422
        return false;
423
 
423
 
424
    b = kgem_get_batch(&sna->kgem);
424
    b = kgem_get_batch(&sna->kgem);
425
    sna->kgem.nbatch += 17;
425
    sna->kgem.nbatch += 17;
426
 
426
 
427
    switch (pipe) {
427
    switch (pipe) {
428
    default: assert(0);
428
    default: assert(0);
429
    case 0: event = 1 << 0; break;
429
    case 0: event = 1 << 0; break;
430
    case 1: event = 1 << 8; break;
430
    case 1: event = 1 << 8; break;
431
    case 2: event = 1 << 14; break;
431
    case 2: event = 1 << 14; break;
432
    }
432
    }
433
 
433
 
434
    b[0] = MI_LOAD_REGISTER_IMM | 1;
434
    b[0] = MI_LOAD_REGISTER_IMM | 1;
435
    b[1] = 0x44050; /* DERRMR */
435
    b[1] = 0x44050; /* DERRMR */
436
    b[2] = ~event;
436
    b[2] = ~event;
437
    b[3] = MI_LOAD_REGISTER_IMM | 1;
437
    b[3] = MI_LOAD_REGISTER_IMM | 1;
438
    b[4] = 0xa188; /* FORCEWAKE_MT */
438
    b[4] = 0xa188; /* FORCEWAKE_MT */
439
    b[5] = 2 << 16 | 2;
439
    b[5] = 2 << 16 | 2;
440
 
440
 
441
    /* The documentation says that the LOAD_SCAN_LINES command
441
    /* The documentation says that the LOAD_SCAN_LINES command
442
     * always comes in pairs. Don't ask me why. */
442
     * always comes in pairs. Don't ask me why. */
443
    switch (pipe) {
443
    switch (pipe) {
444
    default: assert(0);
444
    default: assert(0);
445
    case 0: event = 0 << 19; break;
445
    case 0: event = 0 << 19; break;
446
    case 1: event = 1 << 19; break;
446
    case 1: event = 1 << 19; break;
447
    case 2: event = 4 << 19; break;
447
    case 2: event = 4 << 19; break;
448
    }
448
    }
449
    b[8] = b[6] = MI_LOAD_SCAN_LINES_INCL | event;
449
    b[8] = b[6] = MI_LOAD_SCAN_LINES_INCL | event;
450
    b[9] = b[7] = (y1 << 16) | (y2-1);
450
    b[9] = b[7] = (y1 << 16) | (y2-1);
451
 
451
 
452
    switch (pipe) {
452
    switch (pipe) {
453
    default: assert(0);
453
    default: assert(0);
454
    case 0: event = 1 << 0; break;
454
    case 0: event = 1 << 0; break;
455
    case 1: event = 1 << 8; break;
455
    case 1: event = 1 << 8; break;
456
    case 2: event = 1 << 14; break;
456
    case 2: event = 1 << 14; break;
457
    }
457
    }
458
    b[10] = MI_WAIT_FOR_EVENT | event;
458
    b[10] = MI_WAIT_FOR_EVENT | event;
459
 
459
 
460
    b[11] = MI_LOAD_REGISTER_IMM | 1;
460
    b[11] = MI_LOAD_REGISTER_IMM | 1;
461
    b[12] = 0xa188; /* FORCEWAKE_MT */
461
    b[12] = 0xa188; /* FORCEWAKE_MT */
462
    b[13] = 2 << 16;
462
    b[13] = 2 << 16;
463
    b[14] = MI_LOAD_REGISTER_IMM | 1;
463
    b[14] = MI_LOAD_REGISTER_IMM | 1;
464
    b[15] = 0x44050; /* DERRMR */
464
    b[15] = 0x44050; /* DERRMR */
465
    b[16] = ~0;
465
    b[16] = ~0;
466
 
466
 
467
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
467
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
468
    return true;
468
    return true;
469
}
469
}
470
 
470
 
471
 
471
 
472
static bool sna_emit_wait_for_scanline_ivb(struct sna *sna,
472
static bool sna_emit_wait_for_scanline_ivb(struct sna *sna,
473
                        rect_t *crtc,
473
                        rect_t *crtc,
474
                        int pipe, int y1, int y2,
474
                        int pipe, int y1, int y2,
475
                        bool full_height)
475
                        bool full_height)
476
{
476
{
477
    uint32_t *b;
477
    uint32_t *b;
478
    uint32_t event;
478
    uint32_t event;
479
    uint32_t forcewake;
479
    uint32_t forcewake;
480
 
480
 
481
    if (!sna->kgem.has_secure_batches)
481
    if (!sna->kgem.has_secure_batches)
482
        return false;
482
        return false;
483
 
483
 
484
    assert(y1 >= 0);
484
    assert(y1 >= 0);
485
    assert(y2 > y1);
485
    assert(y2 > y1);
486
    assert(sna->kgem.mode);
486
    assert(sna->kgem.mode);
487
 
487
 
488
    /* Always program one less than the desired value */
488
    /* Always program one less than the desired value */
489
    if (--y1 < 0)
489
    if (--y1 < 0)
490
        y1 = crtc->b;
490
        y1 = crtc->b;
491
    y2--;
491
    y2--;
492
 
492
 
493
    switch (pipe) {
493
    switch (pipe) {
494
    default:
494
    default:
495
        assert(0);
495
        assert(0);
496
    case 0:
496
    case 0:
497
        event = 1 << (full_height ? 3 : 0);
497
        event = 1 << (full_height ? 3 : 0);
498
        break;
498
        break;
499
    case 1:
499
    case 1:
500
        event = 1 << (full_height ? 11 : 8);
500
        event = 1 << (full_height ? 11 : 8);
501
        break;
501
        break;
502
    case 2:
502
    case 2:
503
        event = 1 << (full_height ? 21 : 14);
503
        event = 1 << (full_height ? 21 : 14);
504
        break;
504
        break;
505
    }
505
    }
506
 
506
 
507
    if (sna->kgem.gen == 071)
507
    if (sna->kgem.gen == 071)
508
        forcewake = 0x1300b0; /* FORCEWAKE_VLV */
508
        forcewake = 0x1300b0; /* FORCEWAKE_VLV */
509
    else
509
    else
510
        forcewake = 0xa188; /* FORCEWAKE_MT */
510
        forcewake = 0xa188; /* FORCEWAKE_MT */
511
 
511
 
512
    b = kgem_get_batch(&sna->kgem);
512
    b = kgem_get_batch(&sna->kgem);
513
 
513
 
514
    /* Both the LRI and WAIT_FOR_EVENT must be in the same cacheline */
514
    /* Both the LRI and WAIT_FOR_EVENT must be in the same cacheline */
515
    if (((sna->kgem.nbatch + 6) >> 4) != (sna->kgem.nbatch + 10) >> 4) {
515
    if (((sna->kgem.nbatch + 6) >> 4) != (sna->kgem.nbatch + 10) >> 4) {
516
        int dw = sna->kgem.nbatch + 6;
516
        int dw = sna->kgem.nbatch + 6;
517
        dw = ALIGN(dw, 16) - dw;
517
        dw = ALIGN(dw, 16) - dw;
518
        while (dw--)
518
        while (dw--)
519
            *b++ = MI_NOOP;
519
            *b++ = MI_NOOP;
520
    }
520
    }
521
 
521
 
522
    b[0] = MI_LOAD_REGISTER_IMM | 1;
522
    b[0] = MI_LOAD_REGISTER_IMM | 1;
523
    b[1] = 0x44050; /* DERRMR */
523
    b[1] = 0x44050; /* DERRMR */
524
    b[2] = ~event;
524
    b[2] = ~event;
525
    b[3] = MI_LOAD_REGISTER_IMM | 1;
525
    b[3] = MI_LOAD_REGISTER_IMM | 1;
526
    b[4] = forcewake;
526
    b[4] = forcewake;
527
    b[5] = 2 << 16 | 2;
527
    b[5] = 2 << 16 | 2;
528
    b[6] = MI_LOAD_REGISTER_IMM | 1;
528
    b[6] = MI_LOAD_REGISTER_IMM | 1;
529
    b[7] = 0x70068 + 0x1000 * pipe;
529
    b[7] = 0x70068 + 0x1000 * pipe;
530
    b[8] = (1 << 31) | (1 << 30) | (y1 << 16) | y2;
530
    b[8] = (1 << 31) | (1 << 30) | (y1 << 16) | y2;
531
    b[9] = MI_WAIT_FOR_EVENT | event;
531
    b[9] = MI_WAIT_FOR_EVENT | event;
532
    b[10] = MI_LOAD_REGISTER_IMM | 1;
532
    b[10] = MI_LOAD_REGISTER_IMM | 1;
533
    b[11] = forcewake;
533
    b[11] = forcewake;
534
    b[12] = 2 << 16;
534
    b[12] = 2 << 16;
535
    b[13] = MI_LOAD_REGISTER_IMM | 1;
535
    b[13] = MI_LOAD_REGISTER_IMM | 1;
536
    b[14] = 0x44050; /* DERRMR */
536
    b[14] = 0x44050; /* DERRMR */
537
    b[15] = ~0;
537
    b[15] = ~0;
538
 
538
 
539
    sna->kgem.nbatch = b - sna->kgem.batch + 16;
539
    sna->kgem.nbatch = b - sna->kgem.batch + 16;
540
 
540
 
541
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
541
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
542
    return true;
542
    return true;
543
}
543
}
544
 
544
 
545
 
545
 
546
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
546
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
547
                        rect_t *crtc,
547
                        rect_t *crtc,
548
					    int pipe, int y1, int y2,
548
					    int pipe, int y1, int y2,
549
					    bool full_height)
549
					    bool full_height)
550
{
550
{
551
	uint32_t *b;
551
	uint32_t *b;
552
	uint32_t event;
552
	uint32_t event;
553
 
553
 
554
//	if (!sna->kgem.has_secure_batches)
554
//	if (!sna->kgem.has_secure_batches)
555
//		return false;
555
//		return false;
556
 
556
 
557
	assert(y1 >= 0);
557
	assert(y1 >= 0);
558
	assert(y2 > y1);
558
	assert(y2 > y1);
559
	assert(sna->kgem.mode == KGEM_RENDER);
559
	assert(sna->kgem.mode == KGEM_RENDER);
560
 
560
 
561
	/* Always program one less than the desired value */
561
	/* Always program one less than the desired value */
562
	if (--y1 < 0)
562
	if (--y1 < 0)
563
		y1 = crtc->b;
563
		y1 = crtc->b;
564
	y2--;
564
	y2--;
565
 
565
 
566
	/* The scanline granularity is 3 bits */
566
	/* The scanline granularity is 3 bits */
567
	y1 &= ~7;
567
	y1 &= ~7;
568
	y2 &= ~7;
568
	y2 &= ~7;
569
	if (y2 == y1)
569
	if (y2 == y1)
570
		return false;
570
		return false;
571
 
571
 
572
	event = 1 << (3*full_height + pipe*8);
572
	event = 1 << (3*full_height + pipe*8);
573
 
573
 
574
	b = kgem_get_batch(&sna->kgem);
574
	b = kgem_get_batch(&sna->kgem);
575
	sna->kgem.nbatch += 10;
575
	sna->kgem.nbatch += 10;
576
 
576
 
577
	b[0] = MI_LOAD_REGISTER_IMM | 1;
577
	b[0] = MI_LOAD_REGISTER_IMM | 1;
578
	b[1] = 0x44050; /* DERRMR */
578
	b[1] = 0x44050; /* DERRMR */
579
	b[2] = ~event;
579
	b[2] = ~event;
580
	b[3] = MI_LOAD_REGISTER_IMM | 1;
580
	b[3] = MI_LOAD_REGISTER_IMM | 1;
581
	b[4] = 0x4f100; /* magic */
581
	b[4] = 0x4f100; /* magic */
582
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
582
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
583
	b[6] = MI_WAIT_FOR_EVENT | event;
583
	b[6] = MI_WAIT_FOR_EVENT | event;
584
	b[7] = MI_LOAD_REGISTER_IMM | 1;
584
	b[7] = MI_LOAD_REGISTER_IMM | 1;
585
	b[8] = 0x44050; /* DERRMR */
585
	b[8] = 0x44050; /* DERRMR */
586
	b[9] = ~0;
586
	b[9] = ~0;
587
 
587
 
588
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
588
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
589
 
589
 
590
	return true;
590
	return true;
591
}
591
}
592
 
592
 
593
static bool sna_emit_wait_for_scanline_gen4(struct sna *sna,
593
static bool sna_emit_wait_for_scanline_gen4(struct sna *sna,
594
                        rect_t *crtc,
594
                        rect_t *crtc,
595
                        int pipe, int y1, int y2,
595
                        int pipe, int y1, int y2,
596
                        bool full_height)
596
                        bool full_height)
597
{
597
{
598
    uint32_t event;
598
    uint32_t event;
599
    uint32_t *b;
599
    uint32_t *b;
600
 
600
 
601
    if (pipe == 0) {
601
    if (pipe == 0) {
602
        if (full_height)
602
        if (full_height)
603
            event = MI_WAIT_FOR_PIPEA_SVBLANK;
603
            event = MI_WAIT_FOR_PIPEA_SVBLANK;
604
        else
604
        else
605
            event = MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
605
            event = MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
606
    } else {
606
    } else {
607
        if (full_height)
607
        if (full_height)
608
            event = MI_WAIT_FOR_PIPEB_SVBLANK;
608
            event = MI_WAIT_FOR_PIPEB_SVBLANK;
609
        else
609
        else
610
            event = MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
610
            event = MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
611
    }
611
    }
612
 
612
 
613
    b = kgem_get_batch(&sna->kgem);
613
    b = kgem_get_batch(&sna->kgem);
614
    sna->kgem.nbatch += 5;
614
    sna->kgem.nbatch += 5;
615
 
615
 
616
    /* The documentation says that the LOAD_SCAN_LINES command
616
    /* The documentation says that the LOAD_SCAN_LINES command
617
     * always comes in pairs. Don't ask me why. */
617
     * always comes in pairs. Don't ask me why. */
618
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
618
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
619
    b[3] = b[1] = (y1 << 16) | (y2-1);
619
    b[3] = b[1] = (y1 << 16) | (y2-1);
620
    b[4] = MI_WAIT_FOR_EVENT | event;
620
    b[4] = MI_WAIT_FOR_EVENT | event;
621
 
621
 
622
    return true;
622
    return true;
623
}
623
}
624
 
624
 
625
static bool sna_emit_wait_for_scanline_gen2(struct sna *sna,
625
static bool sna_emit_wait_for_scanline_gen2(struct sna *sna,
626
                        rect_t *crtc,
626
                        rect_t *crtc,
627
                        int pipe, int y1, int y2,
627
                        int pipe, int y1, int y2,
628
                        bool full_height)
628
                        bool full_height)
629
{
629
{
630
    uint32_t *b;
630
    uint32_t *b;
631
 
631
 
632
    /*
632
    /*
633
     * Pre-965 doesn't have SVBLANK, so we need a bit
633
     * Pre-965 doesn't have SVBLANK, so we need a bit
634
     * of extra time for the blitter to start up and
634
     * of extra time for the blitter to start up and
635
     * do its job for a full height blit
635
     * do its job for a full height blit
636
     */
636
     */
637
    if (full_height)
637
    if (full_height)
638
        y2 -= 2;
638
        y2 -= 2;
639
 
639
 
640
    b = kgem_get_batch(&sna->kgem);
640
    b = kgem_get_batch(&sna->kgem);
641
    sna->kgem.nbatch += 5;
641
    sna->kgem.nbatch += 5;
642
 
642
 
643
    /* The documentation says that the LOAD_SCAN_LINES command
643
    /* The documentation says that the LOAD_SCAN_LINES command
644
     * always comes in pairs. Don't ask me why. */
644
     * always comes in pairs. Don't ask me why. */
645
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
645
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
646
    b[3] = b[1] = (y1 << 16) | (y2-1);
646
    b[3] = b[1] = (y1 << 16) | (y2-1);
647
    b[4] = MI_WAIT_FOR_EVENT | 1 << (1 + 4*pipe);
647
    b[4] = MI_WAIT_FOR_EVENT | 1 << (1 + 4*pipe);
648
 
648
 
649
    return true;
649
    return true;
650
}
650
}
651
 
651
 
652
bool
652
bool
653
sna_wait_for_scanline(struct sna *sna,
653
sna_wait_for_scanline(struct sna *sna,
654
		      rect_t *crtc,
654
		      rect_t *crtc,
655
		      rect_t *clip)
655
		      rect_t *clip)
656
{
656
{
657
	bool full_height;
657
	bool full_height;
658
	int y1, y2, pipe;
658
	int y1, y2, pipe;
659
	bool ret;
659
	bool ret;
660
 
660
 
661
//	if (sna->flags & SNA_NO_VSYNC)
661
//	if (sna->flags & SNA_NO_VSYNC)
662
//		return false;
662
//		return false;
663
 
663
 
664
	/*
664
	/*
665
	 * Make sure we don't wait for a scanline that will
665
	 * Make sure we don't wait for a scanline that will
666
	 * never occur
666
	 * never occur
667
	 */
667
	 */
668
	y1 = clip->t - crtc->t;
668
	y1 = clip->t - crtc->t;
669
    if (y1 < 1)
669
    if (y1 < 1)
670
        y1 = 1;
670
        y1 = 1;
671
	y2 = clip->b - crtc->t;
671
	y2 = clip->b - crtc->t;
672
	if (y2 > crtc->b - crtc->t)
672
	if (y2 > crtc->b - crtc->t)
673
		y2 = crtc->b - crtc->t;
673
		y2 = crtc->b - crtc->t;
674
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
674
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
675
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
675
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
676
 
676
 
677
	if (y2 <= y1 + 4)
677
	if (y2 <= y1 + 4)
678
		return false;
678
		return false;
679
 
679
 
680
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
680
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
681
 
681
 
682
    pipe = sna_fb.pipe;
682
    pipe = sna_fb.pipe;
683
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
683
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
684
	     __FUNCTION__, pipe, y1, y2, full_height));
684
	     __FUNCTION__, pipe, y1, y2, full_height));
685
 
685
 
686
	if (sna->kgem.gen >= 0100)
686
	if (sna->kgem.gen >= 0100)
687
		ret = false;
687
		ret = false;
688
    else if (sna->kgem.gen >= 075)
688
    else if (sna->kgem.gen >= 075)
689
        ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
689
        ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
690
    else if (sna->kgem.gen >= 070)
690
    else if (sna->kgem.gen >= 070)
691
        ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
691
        ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
692
	else if (sna->kgem.gen >= 060)
692
	else if (sna->kgem.gen >= 060)
693
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
693
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
694
    else if (sna->kgem.gen >= 040)
694
    else if (sna->kgem.gen >= 040)
695
        ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
695
        ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
696
    else
696
    else
697
        ret = sna_emit_wait_for_scanline_gen2(sna, crtc, pipe, y1, y2, full_height);
697
        ret = sna_emit_wait_for_scanline_gen2(sna, crtc, pipe, y1, y2, full_height);
698
 
698
 
699
	return ret;
699
	return ret;
700
}
700
}
701
 
701
 
702
 
702
 
703
 
703
 
704
 
704
 
705
 
705
 
706
 
706
 
707
 
707
 
-
 
708
 
-
 
709
int intel_get_device_id(struct sna *sna)
-
 
710
{
-
 
711
    struct drm_i915_getparam gp;
-
 
712
    int devid = 0;
-
 
713
 
-
 
714
    memset(&gp, 0, sizeof(gp));
-
 
715
    gp.param = I915_PARAM_CHIPSET_ID;
-
 
716
    gp.value = &devid;
-
 
717
 
-
 
718
    if (drmIoctl(sna->scrn, DRM_IOCTL_I915_GETPARAM, &gp))
-
 
719
        return 0;
-
 
720
    return devid;
708
 
721
}
709
 
722
 
710
static const struct intel_device_info intel_generic_info = {
723
static const struct intel_device_info intel_generic_info = {
711
	.gen = -1,
724
	.gen = -1,
712
};
725
};
713
 
726
 
714
static const struct intel_device_info intel_i915_info = {
727
static const struct intel_device_info intel_i915_info = {
715
	.gen = 030,
728
	.gen = 030,
716
};
729
};
717
static const struct intel_device_info intel_i945_info = {
730
static const struct intel_device_info intel_i945_info = {
718
	.gen = 031,
731
	.gen = 031,
719
};
732
};
720
 
733
 
721
static const struct intel_device_info intel_g33_info = {
734
static const struct intel_device_info intel_g33_info = {
722
	.gen = 033,
735
	.gen = 033,
723
};
736
};
724
 
737
 
725
static const struct intel_device_info intel_i965_info = {
738
static const struct intel_device_info intel_i965_info = {
726
	.gen = 040,
739
	.gen = 040,
727
};
740
};
728
 
741
 
729
static const struct intel_device_info intel_g4x_info = {
742
static const struct intel_device_info intel_g4x_info = {
730
	.gen = 045,
743
	.gen = 045,
731
};
744
};
732
 
745
 
733
static const struct intel_device_info intel_ironlake_info = {
746
static const struct intel_device_info intel_ironlake_info = {
734
	.gen = 050,
747
	.gen = 050,
735
};
748
};
736
 
749
 
737
static const struct intel_device_info intel_sandybridge_info = {
750
static const struct intel_device_info intel_sandybridge_info = {
738
	.gen = 060,
751
	.gen = 060,
739
};
752
};
740
 
753
 
741
static const struct intel_device_info intel_ivybridge_info = {
754
static const struct intel_device_info intel_ivybridge_info = {
742
	.gen = 070,
755
	.gen = 070,
743
};
756
};
744
 
757
 
745
static const struct intel_device_info intel_valleyview_info = {
758
static const struct intel_device_info intel_valleyview_info = {
746
	.gen = 071,
759
	.gen = 071,
747
};
760
};
748
 
761
 
749
static const struct intel_device_info intel_haswell_info = {
762
static const struct intel_device_info intel_haswell_info = {
750
	.gen = 075,
763
	.gen = 075,
751
};
764
};
752
 
765
 
753
#define INTEL_DEVICE_MATCH(d,i) \
766
#define INTEL_DEVICE_MATCH(d,i) \
754
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
767
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
755
 
768
 
756
 
769
 
757
static const struct pci_id_match intel_device_match[] = {
770
static const struct pci_id_match intel_device_match[] = {
758
 
771
 
759
	INTEL_I915G_IDS(&intel_i915_info),
772
	INTEL_I915G_IDS(&intel_i915_info),
760
	INTEL_I915GM_IDS(&intel_i915_info),
773
	INTEL_I915GM_IDS(&intel_i915_info),
761
	INTEL_I945G_IDS(&intel_i945_info),
774
	INTEL_I945G_IDS(&intel_i945_info),
762
	INTEL_I945GM_IDS(&intel_i945_info),
775
	INTEL_I945GM_IDS(&intel_i945_info),
763
 
776
 
764
	INTEL_G33_IDS(&intel_g33_info),
777
	INTEL_G33_IDS(&intel_g33_info),
765
	INTEL_PINEVIEW_IDS(&intel_g33_info),
778
	INTEL_PINEVIEW_IDS(&intel_g33_info),
766
 
779
 
767
	INTEL_I965G_IDS(&intel_i965_info),
780
	INTEL_I965G_IDS(&intel_i965_info),
768
	INTEL_I965GM_IDS(&intel_i965_info),
781
	INTEL_I965GM_IDS(&intel_i965_info),
769
 
782
 
770
	INTEL_G45_IDS(&intel_g4x_info),
783
	INTEL_G45_IDS(&intel_g4x_info),
771
	INTEL_GM45_IDS(&intel_g4x_info),
784
	INTEL_GM45_IDS(&intel_g4x_info),
772
 
785
 
773
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
786
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
774
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
787
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
775
 
788
 
776
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
789
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
777
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
790
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
778
 
791
 
779
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
792
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
780
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
793
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
781
 
794
 
782
	INTEL_HSW_D_IDS(&intel_haswell_info),
795
	INTEL_HSW_D_IDS(&intel_haswell_info),
783
	INTEL_HSW_M_IDS(&intel_haswell_info),
796
	INTEL_HSW_M_IDS(&intel_haswell_info),
784
 
797
 
785
	INTEL_VLV_D_IDS(&intel_valleyview_info),
798
	INTEL_VLV_D_IDS(&intel_valleyview_info),
786
	INTEL_VLV_M_IDS(&intel_valleyview_info),
799
	INTEL_VLV_M_IDS(&intel_valleyview_info),
787
 
800
 
788
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
801
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
789
 
802
 
790
	{ 0, 0, 0 },
803
	{ 0, 0, 0 },
791
};
804
};
792
 
805
 
793
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
806
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
794
{
807
{
795
    while(list->device_id)
808
    while(list->device_id)
796
    {
809
    {
797
        if(dev==list->device_id)
810
        if(dev==list->device_id)
798
            return list;
811
            return list;
799
        list++;
812
        list++;
800
    }
813
    }
801
    return NULL;
814
    return NULL;
802
}
815
}
803
 
816
 
804
const struct intel_device_info *
817
const struct intel_device_info *
805
intel_detect_chipset(struct pci_device *pci)
818
intel_detect_chipset(struct pci_device *pci)
806
{
819
{
807
    const struct pci_id_match *ent = NULL;
820
    const struct pci_id_match *ent = NULL;
808
 
821
 
809
    ent = PciDevMatch(pci->device_id, intel_device_match);
822
    ent = PciDevMatch(pci->device_id, intel_device_match);
810
 
823
 
811
    if(ent != NULL)
824
    if(ent != NULL)
812
        return (const struct intel_device_info*)ent->match_data;
825
        return (const struct intel_device_info*)ent->match_data;
813
    else
826
    else
814
        return &intel_generic_info;
827
        return &intel_generic_info;
815
}
828
}
816
 
-
 
817
int intel_get_device_id(int fd)
-
 
818
{
-
 
819
	struct drm_i915_getparam gp;
-
 
820
	int devid = 0;
-
 
821
 
-
 
822
	memset(&gp, 0, sizeof(gp));
-
 
823
	gp.param = I915_PARAM_CHIPSET_ID;
-
 
824
	gp.value = &devid;
-
 
825
 
-
 
826
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
-
 
827
		return 0;
-
 
828
 
-
 
829
	return devid;
-
 
830
}
-
 
831
 
829
 
832
int drmIoctl(int fd, unsigned long request, void *arg)
830
int drmIoctl(int fd, unsigned long request, void *arg)
833
{
831
{
834
    ioctl_t  io;
832
    ioctl_t  io;
835
 
833
 
836
    io.handle   = fd;
834
    io.handle   = fd;
837
    io.io_code  = request;
835
    io.io_code  = request;
838
    io.input    = arg;
836
    io.input    = arg;
839
    io.inp_size = 64;
837
    io.inp_size = 64;
840
    io.output   = NULL;
838
    io.output   = NULL;
841
    io.out_size = 0;
839
    io.out_size = 0;
842
 
840
 
843
    return call_service(&io);
841
    return call_service(&io);
844
}
842
}
845
 
843
 
846
 
844
 
847
 
845
 
848
bool
846
bool
849
gen6_composite(struct sna *sna,
847
gen6_composite(struct sna *sna,
850
              uint8_t op,
848
              uint8_t op,
851
              PixmapPtr src, struct kgem_bo *src_bo,
849
              PixmapPtr src, struct kgem_bo *src_bo,
852
              PixmapPtr mask,struct kgem_bo *mask_bo,
850
              PixmapPtr mask,struct kgem_bo *mask_bo,
853
              PixmapPtr dst, struct kgem_bo *dst_bo,
851
              PixmapPtr dst, struct kgem_bo *dst_bo,
854
              int32_t src_x, int32_t src_y,
852
              int32_t src_x, int32_t src_y,
855
              int32_t msk_x, int32_t msk_y,
853
              int32_t msk_x, int32_t msk_y,
856
              int32_t dst_x, int32_t dst_y,
854
              int32_t dst_x, int32_t dst_y,
857
              int32_t width, int32_t height,
855
              int32_t width, int32_t height,
858
              struct sna_composite_op *tmp);
856
              struct sna_composite_op *tmp);
859
 
857
 
860
//#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
858
//#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
861
 
859
 
862
 
860
 
863
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
861
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
864
{
862
{
865
    surface_t *sf;
863
    surface_t *sf;
866
    struct kgem_bo *bo;
864
    struct kgem_bo *bo;
867
 
865
 
868
    sf = malloc(sizeof(*sf));
866
    sf = malloc(sizeof(*sf));
869
    if(sf == NULL)
867
    if(sf == NULL)
870
        goto err_1;
868
        goto err_1;
871
 
869
 
872
    __lock_acquire_recursive(__sna_lock);
870
    __lock_acquire_recursive(__sna_lock);
873
 
871
 
874
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
872
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
875
 
873
 
876
    __lock_release_recursive(__sna_lock);
874
    __lock_release_recursive(__sna_lock);
877
 
875
 
878
    sf->width   = bitmap->width;
876
    sf->width   = bitmap->width;
879
    sf->height  = bitmap->height;
877
    sf->height  = bitmap->height;
880
    sf->data    = NULL;
878
    sf->data    = NULL;
881
    sf->pitch   = bo->pitch;
879
    sf->pitch   = bo->pitch;
882
    sf->bo      = bo;
880
    sf->bo      = bo;
883
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
881
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
884
    sf->flags   = bitmap->flags;
882
    sf->flags   = bitmap->flags;
885
 
883
 
886
    bitmap->handle = (uint32_t)sf;
884
    bitmap->handle = (uint32_t)sf;
887
 
885
 
888
    return 0;
886
    return 0;
889
 
887
 
890
err_2:
888
err_2:
891
    __lock_release_recursive(__sna_lock);
889
    __lock_release_recursive(__sna_lock);
892
    free(sf);
890
    free(sf);
893
err_1:
891
err_1:
894
    return -1;
892
    return -1;
895
};
893
};
896
 
894
 
897
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
895
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
898
{
896
{
899
    surface_t *sf = to_surface(bitmap);
897
    surface_t *sf = to_surface(bitmap);
900
    struct kgem_bo *bo = sf->bo;
898
    struct kgem_bo *bo = sf->bo;
901
    bo->handle = handle;
899
    bo->handle = handle;
902
}
900
}
903
 
901
 
904
static int sna_create_bitmap(bitmap_t *bitmap)
902
static int sna_create_bitmap(bitmap_t *bitmap)
905
{
903
{
906
    surface_t *sf;
904
    surface_t *sf;
907
    struct kgem_bo *bo;
905
    struct kgem_bo *bo;
908
 
906
 
909
    sf = malloc(sizeof(*sf));
907
    sf = malloc(sizeof(*sf));
910
    if(sf == NULL)
908
    if(sf == NULL)
911
        goto err_1;
909
        goto err_1;
912
 
910
 
913
    __lock_acquire_recursive(__sna_lock);
911
    __lock_acquire_recursive(__sna_lock);
914
 
912
 
915
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
913
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
916
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
914
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
917
 
915
 
918
    if(bo == NULL)
916
    if(bo == NULL)
919
        goto err_2;
917
        goto err_2;
920
 
918
 
921
    void *map = kgem_bo_map(&sna_device->kgem, bo);
919
    void *map = kgem_bo_map(&sna_device->kgem, bo);
922
    if(map == NULL)
920
    if(map == NULL)
923
        goto err_3;
921
        goto err_3;
924
 
922
 
925
    sf->width   = bitmap->width;
923
    sf->width   = bitmap->width;
926
    sf->height  = bitmap->height;
924
    sf->height  = bitmap->height;
927
    sf->data    = map;
925
    sf->data    = map;
928
    sf->pitch   = bo->pitch;
926
    sf->pitch   = bo->pitch;
929
    sf->bo      = bo;
927
    sf->bo      = bo;
930
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
928
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
931
    sf->flags   = bitmap->flags;
929
    sf->flags   = bitmap->flags;
932
 
930
 
933
    bitmap->handle = (uint32_t)sf;
931
    bitmap->handle = (uint32_t)sf;
934
    __lock_release_recursive(__sna_lock);
932
    __lock_release_recursive(__sna_lock);
935
 
933
 
936
    return 0;
934
    return 0;
937
 
935
 
938
err_3:
936
err_3:
939
    kgem_bo_destroy(&sna_device->kgem, bo);
937
    kgem_bo_destroy(&sna_device->kgem, bo);
940
err_2:
938
err_2:
941
    __lock_release_recursive(__sna_lock);
939
    __lock_release_recursive(__sna_lock);
942
    free(sf);
940
    free(sf);
943
err_1:
941
err_1:
944
    return -1;
942
    return -1;
945
};
943
};
946
 
944
 
947
static int sna_destroy_bitmap(bitmap_t *bitmap)
945
static int sna_destroy_bitmap(bitmap_t *bitmap)
948
{
946
{
949
    surface_t *sf = to_surface(bitmap);
947
    surface_t *sf = to_surface(bitmap);
950
 
948
 
951
    __lock_acquire_recursive(__sna_lock);
949
    __lock_acquire_recursive(__sna_lock);
952
 
950
 
953
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
951
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
954
 
952
 
955
    __lock_release_recursive(__sna_lock);
953
    __lock_release_recursive(__sna_lock);
956
 
954
 
957
    free(sf);
955
    free(sf);
958
 
956
 
959
    bitmap->handle = -1;
957
    bitmap->handle = -1;
960
    bitmap->data   = (void*)-1;
958
    bitmap->data   = (void*)-1;
961
    bitmap->pitch  = -1;
959
    bitmap->pitch  = -1;
962
 
960
 
963
    return 0;
961
    return 0;
964
};
962
};
965
 
963
 
966
static int sna_lock_bitmap(bitmap_t *bitmap)
964
static int sna_lock_bitmap(bitmap_t *bitmap)
967
{
965
{
968
    surface_t *sf = to_surface(bitmap);
966
    surface_t *sf = to_surface(bitmap);
969
 
967
 
970
//    printf("%s\n", __FUNCTION__);
968
//    printf("%s\n", __FUNCTION__);
971
    __lock_acquire_recursive(__sna_lock);
969
    __lock_acquire_recursive(__sna_lock);
972
 
970
 
973
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
971
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
974
 
972
 
975
    __lock_release_recursive(__sna_lock);
973
    __lock_release_recursive(__sna_lock);
976
 
974
 
977
    bitmap->data  = sf->data;
975
    bitmap->data  = sf->data;
978
    bitmap->pitch = sf->pitch;
976
    bitmap->pitch = sf->pitch;
979
 
977
 
980
    return 0;
978
    return 0;
981
};
979
};
982
 
980
 
983
static int sna_resize_bitmap(bitmap_t *bitmap)
981
static int sna_resize_bitmap(bitmap_t *bitmap)
984
{
982
{
985
    surface_t *sf = to_surface(bitmap);
983
    surface_t *sf = to_surface(bitmap);
986
    struct kgem *kgem = &sna_device->kgem;
984
    struct kgem *kgem = &sna_device->kgem;
987
    struct kgem_bo *bo = sf->bo;
985
    struct kgem_bo *bo = sf->bo;
988
 
986
 
989
    uint32_t   size;
987
    uint32_t   size;
990
    uint32_t   pitch;
988
    uint32_t   pitch;
991
 
989
 
992
    bitmap->pitch = -1;
990
    bitmap->pitch = -1;
993
    bitmap->data = (void *) -1;
991
    bitmap->data = (void *) -1;
994
 
992
 
995
    size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
993
    size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
996
                 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
994
                 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
997
    assert(size && size <= kgem->max_object_size);
995
    assert(size && size <= kgem->max_object_size);
998
 
996
 
999
    if(sf->bo_size >= size)
997
    if(sf->bo_size >= size)
1000
    {
998
    {
1001
        sf->width   = bitmap->width;
999
        sf->width   = bitmap->width;
1002
        sf->height  = bitmap->height;
1000
        sf->height  = bitmap->height;
1003
        sf->pitch   = pitch;
1001
        sf->pitch   = pitch;
1004
        bo->pitch   = pitch;
1002
        bo->pitch   = pitch;
1005
 
1003
 
1006
        return 0;
1004
        return 0;
1007
    }
1005
    }
1008
    else
1006
    else
1009
    {
1007
    {
1010
        __lock_acquire_recursive(__sna_lock);
1008
        __lock_acquire_recursive(__sna_lock);
1011
 
1009
 
1012
        sna_bo_destroy(kgem, bo);
1010
        sna_bo_destroy(kgem, bo);
1013
 
1011
 
1014
        sf->bo = NULL;
1012
        sf->bo = NULL;
1015
 
1013
 
1016
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
1014
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
1017
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
1015
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
1018
 
1016
 
1019
        if(bo == NULL)
1017
        if(bo == NULL)
1020
        {
1018
        {
1021
            __lock_release_recursive(__sna_lock);
1019
            __lock_release_recursive(__sna_lock);
1022
            return -1;
1020
            return -1;
1023
        };
1021
        };
1024
 
1022
 
1025
        void *map = kgem_bo_map(kgem, bo);
1023
        void *map = kgem_bo_map(kgem, bo);
1026
        if(map == NULL)
1024
        if(map == NULL)
1027
        {
1025
        {
1028
            sna_bo_destroy(kgem, bo);
1026
            sna_bo_destroy(kgem, bo);
1029
            __lock_release_recursive(__sna_lock);
1027
            __lock_release_recursive(__sna_lock);
1030
            return -1;
1028
            return -1;
1031
        };
1029
        };
1032
 
1030
 
1033
        __lock_release_recursive(__sna_lock);
1031
        __lock_release_recursive(__sna_lock);
1034
 
1032
 
1035
        sf->width   = bitmap->width;
1033
        sf->width   = bitmap->width;
1036
        sf->height  = bitmap->height;
1034
        sf->height  = bitmap->height;
1037
        sf->data    = map;
1035
        sf->data    = map;
1038
        sf->pitch   = bo->pitch;
1036
        sf->pitch   = bo->pitch;
1039
        sf->bo      = bo;
1037
        sf->bo      = bo;
1040
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
1038
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
1041
    }
1039
    }
1042
 
1040
 
1043
    return 0;
1041
    return 0;
1044
};
1042
};
1045
 
1043
 
1046
 
1044
 
1047
 
1045
 
1048
int sna_create_mask()
1046
int sna_create_mask()
1049
{
1047
{
1050
    struct kgem_bo *bo;
1048
    struct kgem_bo *bo;
1051
 
1049
 
1052
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
1050
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
1053
 
1051
 
1054
    __lock_acquire_recursive(__sna_lock);
1052
    __lock_acquire_recursive(__sna_lock);
1055
 
1053
 
1056
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
1054
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
1057
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
1055
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
1058
 
1056
 
1059
    if(unlikely(bo == NULL))
1057
    if(unlikely(bo == NULL))
1060
        goto err_1;
1058
        goto err_1;
1061
 
1059
 
1062
    int *map = kgem_bo_map(&sna_device->kgem, bo);
1060
    int *map = kgem_bo_map(&sna_device->kgem, bo);
1063
    if(map == NULL)
1061
    if(map == NULL)
1064
        goto err_2;
1062
        goto err_2;
1065
 
1063
 
1066
    __lock_release_recursive(__sna_lock);
1064
    __lock_release_recursive(__sna_lock);
1067
 
1065
 
1068
    memset(map, 0, bo->pitch * sna_fb.height);
1066
    memset(map, 0, bo->pitch * sna_fb.height);
1069
 
1067
 
1070
    tls_set(tls_mask, bo);
1068
    tls_set(tls_mask, bo);
1071
 
1069
 
1072
    return 0;
1070
    return 0;
1073
 
1071
 
1074
err_2:
1072
err_2:
1075
    kgem_bo_destroy(&sna_device->kgem, bo);
1073
    kgem_bo_destroy(&sna_device->kgem, bo);
1076
err_1:
1074
err_1:
1077
    __lock_release_recursive(__sna_lock);
1075
    __lock_release_recursive(__sna_lock);
1078
    return -1;
1076
    return -1;
1079
};
1077
};
1080
 
1078
 
1081
 
1079
 
1082
 
1080
 
1083
int sna_blit_tex(bitmap_t *bitmap, int scale, int vsync,
1081
int sna_blit_tex(bitmap_t *bitmap, int scale, int vsync,
1084
                 int dst_x, int dst_y,int w, int h, int src_x, int src_y)
1082
                 int dst_x, int dst_y,int w, int h, int src_x, int src_y)
1085
 
1083
 
1086
{
1084
{
1087
    surface_t *sf = to_surface(bitmap);
1085
    surface_t *sf = to_surface(bitmap);
1088
 
1086
 
1089
    struct drm_i915_mask_update update;
1087
    struct drm_i915_mask_update update;
1090
 
1088
 
1091
    struct sna_composite_op composite;
1089
    struct sna_composite_op composite;
1092
    struct _Pixmap src, dst, mask;
1090
    struct _Pixmap src, dst, mask;
1093
    struct kgem_bo *src_bo, *mask_bo;
1091
    struct kgem_bo *src_bo, *mask_bo;
1094
    int winx, winy;
1092
    int winx, winy;
1095
 
1093
 
1096
    char proc_info[1024];
1094
    char proc_info[1024];
1097
 
1095
 
1098
    get_proc_info(proc_info);
1096
    get_proc_info(proc_info);
1099
 
1097
 
1100
    winx = *(uint32_t*)(proc_info+34);
1098
    winx = *(uint32_t*)(proc_info+34);
1101
    winy = *(uint32_t*)(proc_info+38);
1099
    winy = *(uint32_t*)(proc_info+38);
1102
//    winw = *(uint32_t*)(proc_info+42)+1;
1100
//    winw = *(uint32_t*)(proc_info+42)+1;
1103
//    winh = *(uint32_t*)(proc_info+46)+1;
1101
//    winh = *(uint32_t*)(proc_info+46)+1;
1104
 
1102
 
1105
    mask_bo = tls_get(tls_mask);
1103
    mask_bo = tls_get(tls_mask);
1106
 
1104
 
1107
    if(unlikely(mask_bo == NULL))
1105
    if(unlikely(mask_bo == NULL))
1108
    {
1106
    {
1109
        sna_create_mask();
1107
        sna_create_mask();
1110
        mask_bo = tls_get(tls_mask);
1108
        mask_bo = tls_get(tls_mask);
1111
        if( mask_bo == NULL)
1109
        if( mask_bo == NULL)
1112
            return -1;
1110
            return -1;
1113
    };
1111
    };
1114
 
1112
 
1115
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
1113
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
1116
    {
1114
    {
1117
        __lock_acquire_recursive(__sna_lock);
1115
        __lock_acquire_recursive(__sna_lock);
1118
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
1116
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
1119
        __lock_release_recursive(__sna_lock);
1117
        __lock_release_recursive(__sna_lock);
1120
 
1118
 
1121
        sna_create_mask();
1119
        sna_create_mask();
1122
        mask_bo = tls_get(tls_mask);
1120
        mask_bo = tls_get(tls_mask);
1123
        if( mask_bo == NULL)
1121
        if( mask_bo == NULL)
1124
            return -1;
1122
            return -1;
1125
    }
1123
    }
1126
 
1124
 
1127
    VG_CLEAR(update);
1125
    VG_CLEAR(update);
1128
    update.handle = mask_bo->handle;
1126
    update.handle = mask_bo->handle;
1129
    update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
1127
    update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
1130
    drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
1128
    drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
1131
    mask_bo->pitch = update.bo_pitch;
1129
    mask_bo->pitch = update.bo_pitch;
1132
 
1130
 
1133
    memset(&src, 0, sizeof(src));
1131
    memset(&src, 0, sizeof(src));
1134
    memset(&dst, 0, sizeof(dst));
1132
    memset(&dst, 0, sizeof(dst));
1135
    memset(&mask, 0, sizeof(dst));
1133
    memset(&mask, 0, sizeof(dst));
1136
 
1134
 
1137
    src.drawable.bitsPerPixel = 32;
1135
    src.drawable.bitsPerPixel = 32;
1138
 
1136
 
1139
    src.drawable.width  = sf->width;
1137
    src.drawable.width  = sf->width;
1140
    src.drawable.height = sf->height;
1138
    src.drawable.height = sf->height;
1141
 
1139
 
1142
    dst.drawable.bitsPerPixel = 32;
1140
    dst.drawable.bitsPerPixel = 32;
1143
    dst.drawable.width  = sna_fb.width;
1141
    dst.drawable.width  = sna_fb.width;
1144
    dst.drawable.height = sna_fb.height;
1142
    dst.drawable.height = sna_fb.height;
1145
 
1143
 
1146
    mask.drawable.bitsPerPixel = 8;
1144
    mask.drawable.bitsPerPixel = 8;
1147
    mask.drawable.width  = update.width;
1145
    mask.drawable.width  = update.width;
1148
    mask.drawable.height = update.height;
1146
    mask.drawable.height = update.height;
1149
 
1147
 
1150
    memset(&composite, 0, sizeof(composite));
1148
    memset(&composite, 0, sizeof(composite));
1151
 
1149
 
1152
    src_bo = sf->bo;
1150
    src_bo = sf->bo;
1153
 
1151
 
1154
    __lock_acquire_recursive(__sna_lock);
1152
    __lock_acquire_recursive(__sna_lock);
1155
 
1153
 
1156
    if(vsync)
1154
    if(vsync)
1157
    {
1155
    {
1158
        rect_t crtc, clip;
1156
        rect_t crtc, clip;
1159
 
1157
 
1160
        crtc.l = 0;
1158
        crtc.l = 0;
1161
        crtc.t = 0;
1159
        crtc.t = 0;
1162
        crtc.r = sna_fb.width-1;
1160
        crtc.r = sna_fb.width-1;
1163
        crtc.b = sna_fb.height-1;
1161
        crtc.b = sna_fb.height-1;
1164
 
1162
 
1165
        clip.l = winx+dst_x;
1163
        clip.l = winx+dst_x;
1166
        clip.t = winy+dst_y;
1164
        clip.t = winy+dst_y;
1167
        clip.r = clip.l+w-1;
1165
        clip.r = clip.l+w-1;
1168
        clip.b = clip.t+h-1;
1166
        clip.b = clip.t+h-1;
1169
 
1167
 
1170
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
1168
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
1171
        sna_wait_for_scanline(sna_device, &crtc, &clip);
1169
        sna_wait_for_scanline(sna_device, &crtc, &clip);
1172
    }
1170
    }
1173
 
1171
 
1174
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
1172
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
1175
              &src, src_bo,
1173
              &src, src_bo,
1176
              &mask, mask_bo,
1174
              &mask, mask_bo,
1177
              &dst, sna_fb.fb_bo,
1175
              &dst, sna_fb.fb_bo,
1178
              src_x, src_y,
1176
              src_x, src_y,
1179
              dst_x, dst_y,
1177
              dst_x, dst_y,
1180
              winx+dst_x, winy+dst_y,
1178
              winx+dst_x, winy+dst_y,
1181
              w, h,
1179
              w, h,
1182
              &composite) )
1180
              &composite) )
1183
    {
1181
    {
1184
        struct sna_composite_rectangles r;
1182
        struct sna_composite_rectangles r;
1185
 
1183
 
1186
        r.src.x = src_x;
1184
        r.src.x = src_x;
1187
        r.src.y = src_y;
1185
        r.src.y = src_y;
1188
        r.mask.x = dst_x;
1186
        r.mask.x = dst_x;
1189
        r.mask.y = dst_y;
1187
        r.mask.y = dst_y;
1190
        r.dst.x = winx+dst_x;
1188
        r.dst.x = winx+dst_x;
1191
        r.dst.y = winy+dst_y;
1189
        r.dst.y = winy+dst_y;
1192
        r.width  = w;
1190
        r.width  = w;
1193
        r.height = h;
1191
        r.height = h;
1194
 
1192
 
1195
        composite.blt(sna_device, &composite, &r);
1193
        composite.blt(sna_device, &composite, &r);
1196
        composite.done(sna_device, &composite);
1194
        composite.done(sna_device, &composite);
1197
 
1195
 
1198
    };
1196
    };
1199
 
1197
 
1200
    kgem_submit(&sna_device->kgem);
1198
    kgem_submit(&sna_device->kgem);
1201
 
1199
 
1202
    __lock_release_recursive(__sna_lock);
1200
    __lock_release_recursive(__sna_lock);
1203
 
1201
 
1204
    bitmap->data   = (void*)-1;
1202
    bitmap->data   = (void*)-1;
1205
    bitmap->pitch  = -1;
1203
    bitmap->pitch  = -1;
1206
 
1204
 
1207
    return 0;
1205
    return 0;
1208
}
1206
}
1209
 
1207
 
1210
 
1208
 
1211
static void sna_fini()
1209
static void sna_fini()
1212
{
1210
{
1213
    if( sna_device )
1211
    if( sna_device )
1214
    {
1212
    {
1215
        struct kgem_bo *mask;
1213
        struct kgem_bo *mask;
1216
 
1214
 
1217
        __lock_acquire_recursive(__sna_lock);
1215
        __lock_acquire_recursive(__sna_lock);
1218
 
1216
 
1219
        mask = tls_get(tls_mask);
1217
        mask = tls_get(tls_mask);
1220
 
1218
 
1221
        sna_device->render.fini(sna_device);
1219
        sna_device->render.fini(sna_device);
1222
        if(mask)
1220
        if(mask)
1223
            kgem_bo_destroy(&sna_device->kgem, mask);
1221
            kgem_bo_destroy(&sna_device->kgem, mask);
1224
//        kgem_close_batches(&sna_device->kgem);
1222
//        kgem_close_batches(&sna_device->kgem);
1225
        kgem_cleanup_cache(&sna_device->kgem);
1223
        kgem_cleanup_cache(&sna_device->kgem);
1226
 
1224
 
1227
        sna_device = NULL;
1225
        sna_device = NULL;
1228
        __lock_release_recursive(__sna_lock);
1226
        __lock_release_recursive(__sna_lock);
1229
    };
1227
    };
1230
}
1228
}
1231
 
1229
 
1232
uint32_t DrvInit(uint32_t service, struct pix_driver *driver)
1230
uint32_t DrvInit(uint32_t service, struct pix_driver *driver)
1233
{
1231
{
1234
    ioctl_t   io;
1232
    ioctl_t   io;
1235
    int caps = 0;
1233
    int caps = 0;
1236
 
1234
 
1237
    static struct pci_device device;
1235
    static struct pci_device device;
1238
    struct sna *sna;
1236
    struct sna *sna;
1239
 
1237
 
1240
    DBG(("%s\n", __FUNCTION__));
1238
    DBG(("%s\n", __FUNCTION__));
1241
 
1239
 
1242
    __lock_acquire_recursive(__sna_lock);
1240
    __lock_acquire_recursive(__sna_lock);
1243
 
1241
 
1244
    if(sna_device)
1242
    if(sna_device)
1245
        goto done;
1243
        goto done;
1246
 
1244
 
1247
    io.handle   = service;
1245
    io.handle   = service;
1248
    io.io_code  = SRV_GET_PCI_INFO;
1246
    io.io_code  = SRV_GET_PCI_INFO;
1249
    io.input    = &device;
1247
    io.input    = &device;
1250
    io.inp_size = sizeof(device);
1248
    io.inp_size = sizeof(device);
1251
    io.output   = NULL;
1249
    io.output   = NULL;
1252
    io.out_size = 0;
1250
    io.out_size = 0;
1253
 
1251
 
1254
    if (call_service(&io)!=0)
1252
    if (call_service(&io)!=0)
1255
        goto err1;
1253
        goto err1;
1256
 
1254
 
1257
    sna = malloc(sizeof(*sna));
1255
    sna = malloc(sizeof(*sna));
1258
    if (sna == NULL)
1256
    if (sna == NULL)
1259
        goto err1;
1257
        goto err1;
1260
 
1258
 
1261
    memset(sna, 0, sizeof(*sna));
1259
    memset(sna, 0, sizeof(*sna));
1262
 
1260
 
1263
    sna->cpu_features = sna_cpu_detect();
1261
    sna->cpu_features = sna_cpu_detect();
1264
 
1262
 
1265
    sna->PciInfo = &device;
1263
    sna->PciInfo = &device;
1266
    sna->info = intel_detect_chipset(sna->PciInfo);
1264
    sna->info = intel_detect_chipset(sna->PciInfo);
1267
    sna->scrn = service;
1265
    sna->scrn = service;
1268
 
1266
 
1269
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
1267
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
1270
 
1268
 
1271
    /* Disable tiling by default */
1269
    /* Disable tiling by default */
1272
    sna->tiling = 0;
1270
    sna->tiling = 0;
1273
 
1271
 
1274
    /* Default fail-safe value of 75 Hz */
1272
    /* Default fail-safe value of 75 Hz */
1275
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
1273
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
1276
 
1274
 
1277
    sna->flags = 0;
1275
    sna->flags = 0;
1278
 
1276
 
1279
    sna_accel_init(sna);
1277
    sna_accel_init(sna);
1280
 
1278
 
1281
    tls_mask = tls_alloc();
1279
    tls_mask = tls_alloc();
1282
 
1280
 
1283
//    printf("tls mask %x\n", tls_mask);
1281
//    printf("tls mask %x\n", tls_mask);
1284
 
1282
 
1285
    driver->create_bitmap  = sna_create_bitmap;
1283
    driver->create_bitmap  = sna_create_bitmap;
1286
    driver->destroy_bitmap = sna_destroy_bitmap;
1284
    driver->destroy_bitmap = sna_destroy_bitmap;
1287
    driver->lock_bitmap    = sna_lock_bitmap;
1285
    driver->lock_bitmap    = sna_lock_bitmap;
1288
    driver->blit           = sna_blit_tex;
1286
    driver->blit           = sna_blit_tex;
1289
    driver->resize_bitmap  = sna_resize_bitmap;
1287
    driver->resize_bitmap  = sna_resize_bitmap;
1290
    driver->fini           = sna_fini;
1288
    driver->fini           = sna_fini;
1291
done:
1289
done:
1292
    caps = sna_device->render.caps;
1290
    caps = sna_device->render.caps;
1293
 
1291
 
1294
err1:
1292
err1:
1295
    __lock_release_recursive(__sna_lock);
1293
    __lock_release_recursive(__sna_lock);
1296
 
1294
 
1297
    return caps;
1295
    return caps;
1298
}
1296
}
1299
 
1297
 
1300
static>
1298
static>
1301
 
1299
 
1302
static>
1300
static>
1303
#define>
1301
#define>
1304
#define>
1302
#define>