Subversion Repositories Kolibri OS

Rev

Rev 4359 | Rev 4372 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4359 Rev 4368
1
/**************************************************************************
1
/**************************************************************************
2
 
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
4
Copyright © 2002 by David Dawes
5
 
5
 
6
All Rights Reserved.
6
All Rights Reserved.
7
 
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
13
the Software is furnished to do so, subject to the following conditions:
14
 
14
 
15
The above copyright notice and this permission notice (including the next
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
17
Software.
18
 
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
26
 
27
**************************************************************************/
27
**************************************************************************/
28
 
28
 
29
/*
29
/*
30
 * Authors: Jeff Hartmann 
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
32
 *          David Dawes 
33
 *          Alan Hourihane 
33
 *          Alan Hourihane 
34
 */
34
 */
35
 
35
 
36
#ifdef HAVE_CONFIG_H
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
37
#include "config.h"
38
#endif
38
#endif
39
 
39
 
40
#include 
40
#include 
41
#include 
41
#include 
42
#include "i915_pciids.h"
42
#include "i915_pciids.h"
43
 
43
 
44
#include "compiler.h"
44
#include "compiler.h"
45
#include "sna.h"
45
#include "sna.h"
46
 
46
 
47
#include 
47
#include 
48
#include 
48
#include 
49
 
49
 
50
#define to_surface(x) (surface_t*)((x)->handle)
50
#define to_surface(x) (surface_t*)((x)->handle)
-
 
51
 
-
 
52
typedef struct {
-
 
53
    int l;
-
 
54
    int t;
-
 
55
    int r;
-
 
56
    int b;
-
 
57
} rect_t;
51
 
58
 
52
static struct sna_fb sna_fb;
59
static struct sna_fb sna_fb;
53
static int    tls_mask;
60
static int    tls_mask;
54
 
61
 
55
int tls_alloc(void);
62
int tls_alloc(void);
56
 
63
 
57
static inline void *tls_get(int key)
64
static inline void *tls_get(int key)
58
{
65
{
59
    void *val;
66
    void *val;
60
    __asm__ __volatile__(
67
    __asm__ __volatile__(
61
    "movl %%fs:(%1), %0"
68
    "movl %%fs:(%1), %0"
62
    :"=r"(val)
69
    :"=r"(val)
63
    :"r"(key));
70
    :"r"(key));
64
 
71
 
65
  return val;
72
  return val;
66
};
73
};
67
 
74
 
68
static inline int
75
static inline int
69
tls_set(int key, const void *ptr)
76
tls_set(int key, const void *ptr)
70
{
77
{
71
    if(!(key & 3))
78
    if(!(key & 3))
72
    {
79
    {
73
        __asm__ __volatile__(
80
        __asm__ __volatile__(
74
        "movl %0, %%fs:(%1)"
81
        "movl %0, %%fs:(%1)"
75
        ::"r"(ptr),"r"(key));
82
        ::"r"(ptr),"r"(key));
76
        return 0;
83
        return 0;
77
    }
84
    }
78
    else return -1;
85
    else return -1;
79
}
86
}
80
 
87
 
81
 
88
 
82
 
89
 
83
 
90
 
84
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
91
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
85
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
92
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
86
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
93
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
87
				  unsigned flags, uint32_t width, uint32_t height,
94
				  unsigned flags, uint32_t width, uint32_t height,
88
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
95
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
89
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
96
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
90
                        int pitch, int height);
97
                        int pitch, int height);
91
 
98
 
92
void kgem_close_batches(struct kgem *kgem);
99
void kgem_close_batches(struct kgem *kgem);
93
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
100
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
94
 
101
 
95
 
102
 
96
static bool sna_solid_cache_init(struct sna *sna);
103
static bool sna_solid_cache_init(struct sna *sna);
97
 
104
 
98
struct sna *sna_device;
105
struct sna *sna_device;
99
 
106
 
100
__LOCK_INIT_RECURSIVE(, __sna_lock);
107
__LOCK_INIT_RECURSIVE(, __sna_lock);
101
 
108
 
102
static void no_render_reset(struct sna *sna)
109
static void no_render_reset(struct sna *sna)
103
{
110
{
104
	(void)sna;
111
	(void)sna;
105
}
112
}
106
 
113
 
107
static void no_render_flush(struct sna *sna)
114
static void no_render_flush(struct sna *sna)
108
{
115
{
109
	(void)sna;
116
	(void)sna;
110
}
117
}
111
 
118
 
112
static void
119
static void
113
no_render_context_switch(struct kgem *kgem,
120
no_render_context_switch(struct kgem *kgem,
114
			 int new_mode)
121
			 int new_mode)
115
{
122
{
116
	if (!kgem->nbatch)
123
	if (!kgem->nbatch)
117
		return;
124
		return;
118
 
125
 
119
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
126
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
120
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
127
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
121
		_kgem_submit(kgem);
128
		_kgem_submit(kgem);
122
	}
129
	}
123
 
130
 
124
	(void)new_mode;
131
	(void)new_mode;
125
}
132
}
126
 
133
 
127
static void
134
static void
128
no_render_retire(struct kgem *kgem)
135
no_render_retire(struct kgem *kgem)
129
{
136
{
130
	(void)kgem;
137
	(void)kgem;
131
}
138
}
132
 
139
 
133
static void
140
static void
134
no_render_expire(struct kgem *kgem)
141
no_render_expire(struct kgem *kgem)
135
{
142
{
136
	(void)kgem;
143
	(void)kgem;
137
}
144
}
138
 
145
 
139
static void
146
static void
140
no_render_fini(struct sna *sna)
147
no_render_fini(struct sna *sna)
141
{
148
{
142
	(void)sna;
149
	(void)sna;
143
}
150
}
144
 
151
 
145
const char *no_render_init(struct sna *sna)
152
const char *no_render_init(struct sna *sna)
146
{
153
{
147
    struct sna_render *render = &sna->render;
154
    struct sna_render *render = &sna->render;
148
 
155
 
149
    memset (render,0, sizeof (*render));
156
    memset (render,0, sizeof (*render));
150
 
157
 
151
    render->prefer_gpu = PREFER_GPU_BLT;
158
    render->prefer_gpu = PREFER_GPU_BLT;
152
 
159
 
153
    render->vertices = render->vertex_data;
160
    render->vertices = render->vertex_data;
154
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
161
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
155
 
162
 
156
    render->reset = no_render_reset;
163
    render->reset = no_render_reset;
157
	render->flush = no_render_flush;
164
	render->flush = no_render_flush;
158
	render->fini = no_render_fini;
165
	render->fini = no_render_fini;
159
 
166
 
160
	sna->kgem.context_switch = no_render_context_switch;
167
	sna->kgem.context_switch = no_render_context_switch;
161
	sna->kgem.retire = no_render_retire;
168
	sna->kgem.retire = no_render_retire;
162
	sna->kgem.expire = no_render_expire;
169
	sna->kgem.expire = no_render_expire;
163
 
170
 
164
	sna->kgem.mode = KGEM_RENDER;
171
	sna->kgem.mode = KGEM_RENDER;
165
	sna->kgem.ring = KGEM_RENDER;
172
	sna->kgem.ring = KGEM_RENDER;
166
 
173
 
167
	sna_vertex_init(sna);
174
	sna_vertex_init(sna);
168
	return "generic";
175
	return "generic";
169
 }
176
 }
170
 
177
 
171
void sna_vertex_init(struct sna *sna)
178
void sna_vertex_init(struct sna *sna)
172
{
179
{
173
//    pthread_mutex_init(&sna->render.lock, NULL);
180
//    pthread_mutex_init(&sna->render.lock, NULL);
174
//    pthread_cond_init(&sna->render.wait, NULL);
181
//    pthread_cond_init(&sna->render.wait, NULL);
175
    sna->render.active = 0;
182
    sna->render.active = 0;
176
}
183
}
177
 
184
 
178
int sna_accel_init(struct sna *sna)
185
int sna_accel_init(struct sna *sna)
179
{
186
{
180
    const char *backend;
187
    const char *backend;
181
 
188
 
182
	backend = no_render_init(sna);
189
	backend = no_render_init(sna);
183
	if (sna->info->gen >= 0100)
190
	if (sna->info->gen >= 0100)
184
		(void)backend;
191
		(void)backend;
185
	else if (sna->info->gen >= 070)
192
	else if (sna->info->gen >= 070)
186
		backend = gen7_render_init(sna, backend);
193
		backend = gen7_render_init(sna, backend);
187
	else if (sna->info->gen >= 060)
194
	else if (sna->info->gen >= 060)
188
		backend = gen6_render_init(sna, backend);
195
		backend = gen6_render_init(sna, backend);
189
	else if (sna->info->gen >= 050)
196
	else if (sna->info->gen >= 050)
190
		backend = gen5_render_init(sna, backend);
197
		backend = gen5_render_init(sna, backend);
191
	else if (sna->info->gen >= 040)
198
	else if (sna->info->gen >= 040)
192
		backend = gen4_render_init(sna, backend);
199
		backend = gen4_render_init(sna, backend);
193
	else if (sna->info->gen >= 030)
200
	else if (sna->info->gen >= 030)
194
		backend = gen3_render_init(sna, backend);
201
		backend = gen3_render_init(sna, backend);
195
 
202
 
196
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
203
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
197
	     __FUNCTION__, backend, sna->render.prefer_gpu));
204
	     __FUNCTION__, backend, sna->render.prefer_gpu));
198
 
205
 
199
	kgem_reset(&sna->kgem);
206
	kgem_reset(&sna->kgem);
200
 
207
 
201
    sna_device = sna;
208
    sna_device = sna;
202
 
209
 
203
    return kgem_init_fb(&sna->kgem, &sna_fb);
210
    return kgem_init_fb(&sna->kgem, &sna_fb);
204
}
211
}
205
 
212
 
206
int sna_init(uint32_t service)
213
int sna_init(uint32_t service)
207
{
214
{
208
    ioctl_t   io;
215
    ioctl_t   io;
209
    int caps = 0;
216
    int caps = 0;
210
 
217
 
211
    static struct pci_device device;
218
    static struct pci_device device;
212
    struct sna *sna;
219
    struct sna *sna;
213
 
220
 
214
    DBG(("%s\n", __FUNCTION__));
221
    DBG(("%s\n", __FUNCTION__));
215
 
222
 
216
    __lock_acquire_recursive(__sna_lock);
223
    __lock_acquire_recursive(__sna_lock);
217
 
224
 
218
    if(sna_device)
225
    if(sna_device)
219
        goto done;
226
        goto done;
220
 
227
 
221
    io.handle   = service;
228
    io.handle   = service;
222
    io.io_code  = SRV_GET_PCI_INFO;
229
    io.io_code  = SRV_GET_PCI_INFO;
223
    io.input    = &device;
230
    io.input    = &device;
224
    io.inp_size = sizeof(device);
231
    io.inp_size = sizeof(device);
225
    io.output   = NULL;
232
    io.output   = NULL;
226
    io.out_size = 0;
233
    io.out_size = 0;
227
 
234
 
228
    if (call_service(&io)!=0)
235
    if (call_service(&io)!=0)
229
        goto err1;
236
        goto err1;
230
 
237
 
231
    sna = malloc(sizeof(*sna));
238
    sna = malloc(sizeof(*sna));
232
    if (sna == NULL)
239
    if (sna == NULL)
233
        goto err1;
240
        goto err1;
234
 
241
 
235
    memset(sna, 0, sizeof(*sna));
242
    memset(sna, 0, sizeof(*sna));
236
 
243
 
237
    sna->cpu_features = sna_cpu_detect();
244
    sna->cpu_features = sna_cpu_detect();
238
 
245
 
239
    sna->PciInfo = &device;
246
    sna->PciInfo = &device;
240
  	sna->info = intel_detect_chipset(sna->PciInfo);
247
  	sna->info = intel_detect_chipset(sna->PciInfo);
241
    sna->scrn = service;
248
    sna->scrn = service;
242
 
249
 
243
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
250
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
244
 
251
 
245
 
252
 
246
    /* Disable tiling by default */
253
    /* Disable tiling by default */
247
    sna->tiling = 0;
254
    sna->tiling = 0;
248
 
255
 
249
    /* Default fail-safe value of 75 Hz */
256
    /* Default fail-safe value of 75 Hz */
250
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
257
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
251
 
258
 
252
    sna->flags = 0;
259
    sna->flags = 0;
253
 
260
 
254
    sna_accel_init(sna);
261
    sna_accel_init(sna);
255
 
262
 
256
    tls_mask = tls_alloc();
263
    tls_mask = tls_alloc();
257
 
264
 
258
//    printf("tls mask %x\n", tls_mask);
265
//    printf("tls mask %x\n", tls_mask);
259
 
266
 
260
done:
267
done:
261
    caps = sna_device->render.caps;
268
    caps = sna_device->render.caps;
262
 
269
 
263
err1:
270
err1:
264
    __lock_release_recursive(__sna_lock);
271
    __lock_release_recursive(__sna_lock);
265
 
272
 
266
    return caps;
273
    return caps;
267
}
274
}
268
 
275
 
269
void sna_fini()
276
void sna_fini()
270
{
277
{
-
 
278
    ENTER();
-
 
279
 
271
    if( sna_device )
280
    if( sna_device )
272
    {
281
    {
273
        struct kgem_bo *mask;
282
        struct kgem_bo *mask;
274
 
283
 
275
        __lock_acquire_recursive(__sna_lock);
284
        __lock_acquire_recursive(__sna_lock);
276
 
285
 
277
        mask = tls_get(tls_mask);
286
        mask = tls_get(tls_mask);
278
 
287
 
279
        sna_device->render.fini(sna_device);
288
        sna_device->render.fini(sna_device);
280
        if(mask)
289
        if(mask)
281
            kgem_bo_destroy(&sna_device->kgem, mask);
290
            kgem_bo_destroy(&sna_device->kgem, mask);
282
        kgem_close_batches(&sna_device->kgem);
291
//        kgem_close_batches(&sna_device->kgem);
283
   	    kgem_cleanup_cache(&sna_device->kgem);
292
   	    kgem_cleanup_cache(&sna_device->kgem);
284
 
293
 
285
   	    sna_device = NULL;
294
   	    sna_device = NULL;
286
        __lock_release_recursive(__sna_lock);
295
        __lock_release_recursive(__sna_lock);
287
    };
296
    };
-
 
297
    LEAVE();
288
}
298
}
289
 
299
 
290
#if 0
300
#if 0
291
 
301
 
292
static bool sna_solid_cache_init(struct sna *sna)
302
static bool sna_solid_cache_init(struct sna *sna)
293
{
303
{
294
    struct sna_solid_cache *cache = &sna->render.solid_cache;
304
    struct sna_solid_cache *cache = &sna->render.solid_cache;
295
 
305
 
296
    DBG(("%s\n", __FUNCTION__));
306
    DBG(("%s\n", __FUNCTION__));
297
 
307
 
298
    cache->cache_bo =
308
    cache->cache_bo =
299
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
309
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
300
    if (!cache->cache_bo)
310
    if (!cache->cache_bo)
301
        return FALSE;
311
        return FALSE;
302
 
312
 
303
    /*
313
    /*
304
     * Initialise [0] with white since it is very common and filling the
314
     * Initialise [0] with white since it is very common and filling the
305
     * zeroth slot simplifies some of the checks.
315
     * zeroth slot simplifies some of the checks.
306
     */
316
     */
307
    cache->color[0] = 0xffffffff;
317
    cache->color[0] = 0xffffffff;
308
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
318
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
309
    cache->bo[0]->pitch = 4;
319
    cache->bo[0]->pitch = 4;
310
    cache->dirty = 1;
320
    cache->dirty = 1;
311
    cache->size = 1;
321
    cache->size = 1;
312
    cache->last = 0;
322
    cache->last = 0;
313
 
323
 
314
    return TRUE;
324
    return TRUE;
315
}
325
}
316
 
326
 
317
void
327
void
318
sna_render_flush_solid(struct sna *sna)
328
sna_render_flush_solid(struct sna *sna)
319
{
329
{
320
    struct sna_solid_cache *cache = &sna->render.solid_cache;
330
    struct sna_solid_cache *cache = &sna->render.solid_cache;
321
 
331
 
322
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
332
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
323
    assert(cache->dirty);
333
    assert(cache->dirty);
324
    assert(cache->size);
334
    assert(cache->size);
325
 
335
 
326
    kgem_bo_write(&sna->kgem, cache->cache_bo,
336
    kgem_bo_write(&sna->kgem, cache->cache_bo,
327
              cache->color, cache->size*sizeof(uint32_t));
337
              cache->color, cache->size*sizeof(uint32_t));
328
    cache->dirty = 0;
338
    cache->dirty = 0;
329
    cache->last = 0;
339
    cache->last = 0;
330
}
340
}
331
 
341
 
332
static void
342
static void
333
sna_render_finish_solid(struct sna *sna, bool force)
343
sna_render_finish_solid(struct sna *sna, bool force)
334
{
344
{
335
    struct sna_solid_cache *cache = &sna->render.solid_cache;
345
    struct sna_solid_cache *cache = &sna->render.solid_cache;
336
    int i;
346
    int i;
337
 
347
 
338
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
348
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
339
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
349
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
340
 
350
 
341
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
351
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
342
        return;
352
        return;
343
 
353
 
344
    if (cache->dirty)
354
    if (cache->dirty)
345
        sna_render_flush_solid(sna);
355
        sna_render_flush_solid(sna);
346
 
356
 
347
    for (i = 0; i < cache->size; i++) {
357
    for (i = 0; i < cache->size; i++) {
348
        if (cache->bo[i] == NULL)
358
        if (cache->bo[i] == NULL)
349
            continue;
359
            continue;
350
 
360
 
351
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
361
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
352
        cache->bo[i] = NULL;
362
        cache->bo[i] = NULL;
353
    }
363
    }
354
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
364
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
355
 
365
 
356
    DBG(("sna_render_finish_solid reset\n"));
366
    DBG(("sna_render_finish_solid reset\n"));
357
 
367
 
358
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
368
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
359
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
369
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
360
    cache->bo[0]->pitch = 4;
370
    cache->bo[0]->pitch = 4;
361
    if (force)
371
    if (force)
362
        cache->size = 1;
372
        cache->size = 1;
363
}
373
}
364
 
374
 
365
 
375
 
366
struct kgem_bo *
376
struct kgem_bo *
367
sna_render_get_solid(struct sna *sna, uint32_t color)
377
sna_render_get_solid(struct sna *sna, uint32_t color)
368
{
378
{
369
    struct sna_solid_cache *cache = &sna->render.solid_cache;
379
    struct sna_solid_cache *cache = &sna->render.solid_cache;
370
    int i;
380
    int i;
371
 
381
 
372
    DBG(("%s: %08x\n", __FUNCTION__, color));
382
    DBG(("%s: %08x\n", __FUNCTION__, color));
373
 
383
 
374
//    if ((color & 0xffffff) == 0) /* alpha only */
384
//    if ((color & 0xffffff) == 0) /* alpha only */
375
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
385
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
376
 
386
 
377
    if (color == 0xffffffff) {
387
    if (color == 0xffffffff) {
378
        DBG(("%s(white)\n", __FUNCTION__));
388
        DBG(("%s(white)\n", __FUNCTION__));
379
        return kgem_bo_reference(cache->bo[0]);
389
        return kgem_bo_reference(cache->bo[0]);
380
    }
390
    }
381
 
391
 
382
    if (cache->color[cache->last] == color) {
392
    if (cache->color[cache->last] == color) {
383
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
393
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
384
             cache->last, color));
394
             cache->last, color));
385
        return kgem_bo_reference(cache->bo[cache->last]);
395
        return kgem_bo_reference(cache->bo[cache->last]);
386
    }
396
    }
387
 
397
 
388
    for (i = 1; i < cache->size; i++) {
398
    for (i = 1; i < cache->size; i++) {
389
        if (cache->color[i] == color) {
399
        if (cache->color[i] == color) {
390
            if (cache->bo[i] == NULL) {
400
            if (cache->bo[i] == NULL) {
391
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
401
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
392
                     i, color));
402
                     i, color));
393
                goto create;
403
                goto create;
394
            } else {
404
            } else {
395
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
405
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
396
                     i, color));
406
                     i, color));
397
                goto done;
407
                goto done;
398
            }
408
            }
399
        }
409
        }
400
    }
410
    }
401
 
411
 
402
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
412
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
403
 
413
 
404
    i = cache->size++;
414
    i = cache->size++;
405
    cache->color[i] = color;
415
    cache->color[i] = color;
406
    cache->dirty = 1;
416
    cache->dirty = 1;
407
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
417
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
408
 
418
 
409
create:
419
create:
410
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
420
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
411
                     i*sizeof(uint32_t), sizeof(uint32_t));
421
                     i*sizeof(uint32_t), sizeof(uint32_t));
412
    cache->bo[i]->pitch = 4;
422
    cache->bo[i]->pitch = 4;
413
 
423
 
414
done:
424
done:
415
    cache->last = i;
425
    cache->last = i;
416
    return kgem_bo_reference(cache->bo[i]);
426
    return kgem_bo_reference(cache->bo[i]);
417
}
427
}
418
 
428
 
419
#endif
429
#endif
420
 
430
 
421
 
431
 
422
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
432
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
423
                  int w, int h, int src_x, int src_y)
433
                  int w, int h, int src_x, int src_y)
424
 
434
 
425
{
435
{
426
    struct sna_copy_op copy;
436
    struct sna_copy_op copy;
427
    struct _Pixmap src, dst;
437
    struct _Pixmap src, dst;
428
    struct kgem_bo *src_bo;
438
    struct kgem_bo *src_bo;
429
 
439
 
430
    char proc_info[1024];
440
    char proc_info[1024];
431
    int winx, winy;
441
    int winx, winy;
432
 
442
 
433
    get_proc_info(proc_info);
443
    get_proc_info(proc_info);
434
 
444
 
435
    winx = *(uint32_t*)(proc_info+34);
445
    winx = *(uint32_t*)(proc_info+34);
436
    winy = *(uint32_t*)(proc_info+38);
446
    winy = *(uint32_t*)(proc_info+38);
437
 
447
 
438
    memset(&src, 0, sizeof(src));
448
    memset(&src, 0, sizeof(src));
439
    memset(&dst, 0, sizeof(dst));
449
    memset(&dst, 0, sizeof(dst));
440
 
450
 
441
    src.drawable.bitsPerPixel = 32;
451
    src.drawable.bitsPerPixel = 32;
442
    src.drawable.width  = src_bitmap->width;
452
    src.drawable.width  = src_bitmap->width;
443
    src.drawable.height = src_bitmap->height;
453
    src.drawable.height = src_bitmap->height;
444
 
454
 
445
    dst.drawable.bitsPerPixel = 32;
455
    dst.drawable.bitsPerPixel = 32;
446
    dst.drawable.width  = sna_fb.width;
456
    dst.drawable.width  = sna_fb.width;
447
    dst.drawable.height = sna_fb.height;
457
    dst.drawable.height = sna_fb.height;
448
 
458
 
449
    memset(©, 0, sizeof(copy));
459
    memset(©, 0, sizeof(copy));
450
 
460
 
451
    src_bo = (struct kgem_bo*)src_bitmap->handle;
461
    src_bo = (struct kgem_bo*)src_bitmap->handle;
452
 
462
 
453
    if( sna_device->render.copy(sna_device, GXcopy,
463
    if( sna_device->render.copy(sna_device, GXcopy,
454
                                &src, src_bo,
464
                                &src, src_bo,
455
                                &dst, sna_fb.fb_bo, ©) )
465
                                &dst, sna_fb.fb_bo, ©) )
456
    {
466
    {
457
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
467
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
458
        copy.done(sna_device, ©);
468
        copy.done(sna_device, ©);
459
    }
469
    }
460
 
470
 
461
    kgem_submit(&sna_device->kgem);
471
    kgem_submit(&sna_device->kgem);
462
 
472
 
463
    return 0;
473
    return 0;
464
 
474
 
465
//    __asm__ __volatile__("int3");
475
//    __asm__ __volatile__("int3");
466
 
476
 
467
};
477
};
468
 
478
 
469
typedef struct
479
typedef struct
470
{
480
{
471
    uint32_t        width;
481
    uint32_t        width;
472
    uint32_t        height;
482
    uint32_t        height;
473
    void           *data;
483
    void           *data;
474
    uint32_t        pitch;
484
    uint32_t        pitch;
475
    struct kgem_bo *bo;
485
    struct kgem_bo *bo;
476
    uint32_t        bo_size;
486
    uint32_t        bo_size;
477
    uint32_t        flags;
487
    uint32_t        flags;
478
}surface_t;
488
}surface_t;
479
 
489
 
480
 
490
 
481
 
491
 
482
int sna_create_bitmap(bitmap_t *bitmap)
492
int sna_create_bitmap(bitmap_t *bitmap)
483
{
493
{
484
    surface_t *sf;
494
    surface_t *sf;
485
	struct kgem_bo *bo;
495
	struct kgem_bo *bo;
486
 
496
 
487
    sf = malloc(sizeof(*sf));
497
    sf = malloc(sizeof(*sf));
488
    if(sf == NULL)
498
    if(sf == NULL)
489
        goto err_1;
499
        goto err_1;
490
 
500
 
491
    __lock_acquire_recursive(__sna_lock);
501
    __lock_acquire_recursive(__sna_lock);
492
 
502
 
493
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
503
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
494
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
504
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
495
 
505
 
496
    if(bo == NULL)
506
    if(bo == NULL)
497
        goto err_2;
507
        goto err_2;
498
 
508
 
499
    void *map = kgem_bo_map(&sna_device->kgem, bo);
509
    void *map = kgem_bo_map(&sna_device->kgem, bo);
500
    if(map == NULL)
510
    if(map == NULL)
501
        goto err_3;
511
        goto err_3;
502
 
512
 
503
    sf->width   = bitmap->width;
513
    sf->width   = bitmap->width;
504
    sf->height  = bitmap->height;
514
    sf->height  = bitmap->height;
505
    sf->data    = map;
515
    sf->data    = map;
506
    sf->pitch   = bo->pitch;
516
    sf->pitch   = bo->pitch;
507
    sf->bo      = bo;
517
    sf->bo      = bo;
508
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
518
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
509
    sf->flags   = bitmap->flags;
519
    sf->flags   = bitmap->flags;
510
 
520
 
511
    bitmap->handle = (uint32_t)sf;
521
    bitmap->handle = (uint32_t)sf;
512
    __lock_release_recursive(__sna_lock);
522
    __lock_release_recursive(__sna_lock);
513
 
523
 
514
    return 0;
524
    return 0;
515
 
525
 
516
err_3:
526
err_3:
517
    kgem_bo_destroy(&sna_device->kgem, bo);
527
    kgem_bo_destroy(&sna_device->kgem, bo);
518
err_2:
528
err_2:
519
    __lock_release_recursive(__sna_lock);
529
    __lock_release_recursive(__sna_lock);
520
    free(sf);
530
    free(sf);
521
err_1:
531
err_1:
522
    return -1;
532
    return -1;
523
};
533
};
524
 
534
 
525
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
535
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
526
{
536
{
527
    surface_t *sf;
537
    surface_t *sf;
528
	struct kgem_bo *bo;
538
	struct kgem_bo *bo;
529
 
539
 
530
    sf = malloc(sizeof(*sf));
540
    sf = malloc(sizeof(*sf));
531
    if(sf == NULL)
541
    if(sf == NULL)
532
        goto err_1;
542
        goto err_1;
533
 
543
 
534
    __lock_acquire_recursive(__sna_lock);
544
    __lock_acquire_recursive(__sna_lock);
535
 
545
 
536
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
546
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
537
 
547
 
538
    __lock_release_recursive(__sna_lock);
548
    __lock_release_recursive(__sna_lock);
539
 
549
 
540
    sf->width   = bitmap->width;
550
    sf->width   = bitmap->width;
541
    sf->height  = bitmap->height;
551
    sf->height  = bitmap->height;
542
    sf->data    = NULL;
552
    sf->data    = NULL;
543
    sf->pitch   = bo->pitch;
553
    sf->pitch   = bo->pitch;
544
    sf->bo      = bo;
554
    sf->bo      = bo;
545
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
555
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
546
    sf->flags   = bitmap->flags;
556
    sf->flags   = bitmap->flags;
547
 
557
 
548
    bitmap->handle = (uint32_t)sf;
558
    bitmap->handle = (uint32_t)sf;
549
 
559
 
550
    return 0;
560
    return 0;
551
 
561
 
552
err_2:
562
err_2:
553
    __lock_release_recursive(__sna_lock);
563
    __lock_release_recursive(__sna_lock);
554
    free(sf);
564
    free(sf);
555
err_1:
565
err_1:
556
    return -1;
566
    return -1;
557
};
567
};
558
 
568
 
559
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
569
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
560
{
570
{
561
    surface_t *sf = to_surface(bitmap);
571
    surface_t *sf = to_surface(bitmap);
562
    struct kgem_bo *bo = sf->bo;
572
    struct kgem_bo *bo = sf->bo;
563
    bo->handle = handle;
573
    bo->handle = handle;
564
}
574
}
565
 
575
 
566
int sna_destroy_bitmap(bitmap_t *bitmap)
576
int sna_destroy_bitmap(bitmap_t *bitmap)
567
{
577
{
568
    surface_t *sf = to_surface(bitmap);
578
    surface_t *sf = to_surface(bitmap);
569
 
579
 
570
    __lock_acquire_recursive(__sna_lock);
580
    __lock_acquire_recursive(__sna_lock);
571
 
581
 
572
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
582
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
573
 
583
 
574
    __lock_release_recursive(__sna_lock);
584
    __lock_release_recursive(__sna_lock);
575
 
585
 
576
    free(sf);
586
    free(sf);
577
 
587
 
578
    bitmap->handle = -1;
588
    bitmap->handle = -1;
579
    bitmap->data   = (void*)-1;
589
    bitmap->data   = (void*)-1;
580
    bitmap->pitch  = -1;
590
    bitmap->pitch  = -1;
581
 
591
 
582
    return 0;
592
    return 0;
583
};
593
};
584
 
594
 
585
int sna_lock_bitmap(bitmap_t *bitmap)
595
int sna_lock_bitmap(bitmap_t *bitmap)
586
{
596
{
587
    surface_t *sf = to_surface(bitmap);
597
    surface_t *sf = to_surface(bitmap);
588
 
598
 
589
//    printf("%s\n", __FUNCTION__);
599
//    printf("%s\n", __FUNCTION__);
590
    __lock_acquire_recursive(__sna_lock);
600
    __lock_acquire_recursive(__sna_lock);
591
 
601
 
592
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
602
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
593
 
603
 
594
    __lock_release_recursive(__sna_lock);
604
    __lock_release_recursive(__sna_lock);
595
 
605
 
596
    bitmap->data  = sf->data;
606
    bitmap->data  = sf->data;
597
    bitmap->pitch = sf->pitch;
607
    bitmap->pitch = sf->pitch;
598
 
608
 
599
    return 0;
609
    return 0;
600
};
610
};
601
 
611
 
602
int sna_resize_bitmap(bitmap_t *bitmap)
612
int sna_resize_bitmap(bitmap_t *bitmap)
603
{
613
{
604
    surface_t *sf = to_surface(bitmap);
614
    surface_t *sf = to_surface(bitmap);
605
    struct kgem *kgem = &sna_device->kgem;
615
    struct kgem *kgem = &sna_device->kgem;
606
    struct kgem_bo *bo = sf->bo;
616
    struct kgem_bo *bo = sf->bo;
607
 
617
 
608
    uint32_t   size;
618
    uint32_t   size;
609
    uint32_t   pitch;
619
    uint32_t   pitch;
610
 
620
 
611
   	bitmap->pitch = -1;
621
   	bitmap->pitch = -1;
612
    bitmap->data = (void *) -1;
622
    bitmap->data = (void *) -1;
613
 
623
 
614
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
624
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
615
				 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
625
				 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
616
	assert(size && size <= kgem->max_object_size);
626
	assert(size && size <= kgem->max_object_size);
617
 
627
 
618
    if(sf->bo_size >= size)
628
    if(sf->bo_size >= size)
619
    {
629
    {
620
        sf->width   = bitmap->width;
630
        sf->width   = bitmap->width;
621
        sf->height  = bitmap->height;
631
        sf->height  = bitmap->height;
622
        sf->pitch   = pitch;
632
        sf->pitch   = pitch;
623
        bo->pitch   = pitch;
633
        bo->pitch   = pitch;
624
 
634
 
625
	    return 0;
635
	    return 0;
626
    }
636
    }
627
    else
637
    else
628
    {
638
    {
629
        __lock_acquire_recursive(__sna_lock);
639
        __lock_acquire_recursive(__sna_lock);
630
 
640
 
631
        sna_bo_destroy(kgem, bo);
641
        sna_bo_destroy(kgem, bo);
632
 
642
 
633
        sf->bo = NULL;
643
        sf->bo = NULL;
634
 
644
 
635
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
645
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
636
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
646
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
637
 
647
 
638
        if(bo == NULL)
648
        if(bo == NULL)
639
        {
649
        {
640
            __lock_release_recursive(__sna_lock);
650
            __lock_release_recursive(__sna_lock);
641
            return -1;
651
            return -1;
642
        };
652
        };
643
 
653
 
644
        void *map = kgem_bo_map(kgem, bo);
654
        void *map = kgem_bo_map(kgem, bo);
645
        if(map == NULL)
655
        if(map == NULL)
646
        {
656
        {
647
            sna_bo_destroy(kgem, bo);
657
            sna_bo_destroy(kgem, bo);
648
            __lock_release_recursive(__sna_lock);
658
            __lock_release_recursive(__sna_lock);
649
            return -1;
659
            return -1;
650
        };
660
        };
651
 
661
 
652
        __lock_release_recursive(__sna_lock);
662
        __lock_release_recursive(__sna_lock);
653
 
663
 
654
        sf->width   = bitmap->width;
664
        sf->width   = bitmap->width;
655
        sf->height  = bitmap->height;
665
        sf->height  = bitmap->height;
656
        sf->data    = map;
666
        sf->data    = map;
657
        sf->pitch   = bo->pitch;
667
        sf->pitch   = bo->pitch;
658
        sf->bo      = bo;
668
        sf->bo      = bo;
659
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
669
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
660
    }
670
    }
661
 
671
 
662
    return 0;
672
    return 0;
663
};
673
};
664
 
674
 
665
 
675
 
666
 
676
 
667
int sna_create_mask()
677
int sna_create_mask()
668
{
678
{
669
	struct kgem_bo *bo;
679
	struct kgem_bo *bo;
670
 
680
 
671
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
681
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
672
 
682
 
673
    __lock_acquire_recursive(__sna_lock);
683
    __lock_acquire_recursive(__sna_lock);
674
 
684
 
675
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
685
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
676
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
686
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
677
 
687
 
678
    if(unlikely(bo == NULL))
688
    if(unlikely(bo == NULL))
679
        goto err_1;
689
        goto err_1;
680
 
690
 
681
    int *map = kgem_bo_map(&sna_device->kgem, bo);
691
    int *map = kgem_bo_map(&sna_device->kgem, bo);
682
    if(map == NULL)
692
    if(map == NULL)
683
        goto err_2;
693
        goto err_2;
684
 
694
 
685
    __lock_release_recursive(__sna_lock);
695
    __lock_release_recursive(__sna_lock);
686
 
696
 
687
    memset(map, 0, bo->pitch * sna_fb.height);
697
    memset(map, 0, bo->pitch * sna_fb.height);
688
 
698
 
689
    tls_set(tls_mask, bo);
699
    tls_set(tls_mask, bo);
690
 
700
 
691
    return 0;
701
    return 0;
692
 
702
 
693
err_2:
703
err_2:
694
    kgem_bo_destroy(&sna_device->kgem, bo);
704
    kgem_bo_destroy(&sna_device->kgem, bo);
695
err_1:
705
err_1:
696
    __lock_release_recursive(__sna_lock);
706
    __lock_release_recursive(__sna_lock);
697
    return -1;
707
    return -1;
698
};
708
};
-
 
709
 
-
 
710
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
-
 
711
#define MI_WAIT_FOR_EVENT			(0x03<<23)
-
 
712
 
-
 
713
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
-
 
714
                        rect_t *crtc,
-
 
715
					    int pipe, int y1, int y2,
-
 
716
					    bool full_height)
-
 
717
{
-
 
718
	uint32_t *b;
-
 
719
	uint32_t event;
-
 
720
 
-
 
721
//	if (!sna->kgem.has_secure_batches)
-
 
722
//		return false;
-
 
723
 
-
 
724
	assert(y1 >= 0);
-
 
725
	assert(y2 > y1);
-
 
726
	assert(sna->kgem.mode == KGEM_RENDER);
-
 
727
 
-
 
728
	/* Always program one less than the desired value */
-
 
729
	if (--y1 < 0)
-
 
730
		y1 = crtc->b;
-
 
731
	y2--;
-
 
732
 
-
 
733
	/* The scanline granularity is 3 bits */
-
 
734
	y1 &= ~7;
-
 
735
	y2 &= ~7;
-
 
736
	if (y2 == y1)
-
 
737
		return false;
-
 
738
 
-
 
739
	event = 1 << (3*full_height + pipe*8);
-
 
740
 
-
 
741
	b = kgem_get_batch(&sna->kgem);
-
 
742
	sna->kgem.nbatch += 10;
-
 
743
 
-
 
744
	b[0] = MI_LOAD_REGISTER_IMM | 1;
-
 
745
	b[1] = 0x44050; /* DERRMR */
-
 
746
	b[2] = ~event;
-
 
747
	b[3] = MI_LOAD_REGISTER_IMM | 1;
-
 
748
	b[4] = 0x4f100; /* magic */
-
 
749
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
-
 
750
	b[6] = MI_WAIT_FOR_EVENT | event;
-
 
751
	b[7] = MI_LOAD_REGISTER_IMM | 1;
-
 
752
	b[8] = 0x44050; /* DERRMR */
-
 
753
	b[9] = ~0;
-
 
754
 
-
 
755
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
-
 
756
 
-
 
757
	return true;
-
 
758
}
-
 
759
 
-
 
760
bool
-
 
761
sna_wait_for_scanline(struct sna *sna,
-
 
762
		      rect_t *crtc,
-
 
763
		      rect_t *clip)
-
 
764
{
-
 
765
	bool full_height;
-
 
766
	int y1, y2, pipe;
-
 
767
	bool ret;
-
 
768
 
-
 
769
//	if (sna->flags & SNA_NO_VSYNC)
-
 
770
//		return false;
-
 
771
 
-
 
772
	/*
-
 
773
	 * Make sure we don't wait for a scanline that will
-
 
774
	 * never occur
-
 
775
	 */
-
 
776
	y1 = clip->t - crtc->t;
-
 
777
	if (y1 < 0)
-
 
778
		y1 = 0;
-
 
779
	y2 = clip->b - crtc->t;
-
 
780
	if (y2 > crtc->b - crtc->t)
-
 
781
		y2 = crtc->b - crtc->t;
-
 
782
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
-
 
783
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
-
 
784
 
-
 
785
	if (y2 <= y1 + 4)
-
 
786
		return false;
-
 
787
 
-
 
788
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
-
 
789
 
-
 
790
	pipe = 0;
-
 
791
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
-
 
792
	     __FUNCTION__, pipe, y1, y2, full_height));
-
 
793
 
-
 
794
	if (sna->kgem.gen >= 0100)
-
 
795
		ret = false;
-
 
796
//	else if (sna->kgem.gen >= 075)
-
 
797
//		ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
-
 
798
//	else if (sna->kgem.gen >= 070)
-
 
799
//		ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
-
 
800
	else if (sna->kgem.gen >= 060)
-
 
801
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
-
 
802
//	else if (sna->kgem.gen >= 040)
-
 
803
//		ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
-
 
804
 
-
 
805
	return ret;
-
 
806
}
699
 
807
 
700
 
808
 
701
bool
809
bool
702
gen6_composite(struct sna *sna,
810
gen6_composite(struct sna *sna,
703
              uint8_t op,
811
              uint8_t op,
704
		      PixmapPtr src, struct kgem_bo *src_bo,
812
		      PixmapPtr src, struct kgem_bo *src_bo,
705
		      PixmapPtr mask,struct kgem_bo *mask_bo,
813
		      PixmapPtr mask,struct kgem_bo *mask_bo,
706
		      PixmapPtr dst, struct kgem_bo *dst_bo,
814
		      PixmapPtr dst, struct kgem_bo *dst_bo,
707
              int32_t src_x, int32_t src_y,
815
              int32_t src_x, int32_t src_y,
708
              int32_t msk_x, int32_t msk_y,
816
              int32_t msk_x, int32_t msk_y,
709
              int32_t dst_x, int32_t dst_y,
817
              int32_t dst_x, int32_t dst_y,
710
              int32_t width, int32_t height,
818
              int32_t width, int32_t height,
711
              struct sna_composite_op *tmp);
819
              struct sna_composite_op *tmp);
712
 
820
 
713
 
821
 
714
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
822
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
715
 
823
 
716
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
824
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
717
                  int w, int h, int src_x, int src_y)
825
                  int w, int h, int src_x, int src_y)
718
 
826
 
719
{
827
{
720
    surface_t *sf = to_surface(bitmap);
828
    surface_t *sf = to_surface(bitmap);
721
 
829
 
722
    struct drm_i915_mask_update update;
830
    struct drm_i915_mask_update update;
723
 
831
 
724
    struct sna_composite_op composite;
832
    struct sna_composite_op composite;
725
    struct _Pixmap src, dst, mask;
833
    struct _Pixmap src, dst, mask;
726
    struct kgem_bo *src_bo, *mask_bo;
834
    struct kgem_bo *src_bo, *mask_bo;
727
    int winx, winy;
835
    int winx, winy;
728
 
836
 
729
    char proc_info[1024];
837
    char proc_info[1024];
730
 
838
 
731
    get_proc_info(proc_info);
839
    get_proc_info(proc_info);
732
 
840
 
733
    winx = *(uint32_t*)(proc_info+34);
841
    winx = *(uint32_t*)(proc_info+34);
734
    winy = *(uint32_t*)(proc_info+38);
842
    winy = *(uint32_t*)(proc_info+38);
735
//    winw = *(uint32_t*)(proc_info+42)+1;
843
//    winw = *(uint32_t*)(proc_info+42)+1;
736
//    winh = *(uint32_t*)(proc_info+46)+1;
844
//    winh = *(uint32_t*)(proc_info+46)+1;
737
 
845
 
738
    mask_bo = tls_get(tls_mask);
846
    mask_bo = tls_get(tls_mask);
739
 
847
 
740
    if(unlikely(mask_bo == NULL))
848
    if(unlikely(mask_bo == NULL))
741
    {
849
    {
742
        sna_create_mask();
850
        sna_create_mask();
743
        mask_bo = tls_get(tls_mask);
851
        mask_bo = tls_get(tls_mask);
744
        if( mask_bo == NULL)
852
        if( mask_bo == NULL)
745
            return -1;
853
            return -1;
746
    };
854
    };
747
 
855
 
748
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
856
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
749
    {
857
    {
750
        __lock_acquire_recursive(__sna_lock);
858
        __lock_acquire_recursive(__sna_lock);
751
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
859
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
752
        __lock_release_recursive(__sna_lock);
860
        __lock_release_recursive(__sna_lock);
753
 
861
 
754
        sna_create_mask();
862
        sna_create_mask();
755
        mask_bo = tls_get(tls_mask);
863
        mask_bo = tls_get(tls_mask);
756
        if( mask_bo == NULL)
864
        if( mask_bo == NULL)
757
            return -1;
865
            return -1;
758
    }
866
    }
759
 
867
 
760
    VG_CLEAR(update);
868
    VG_CLEAR(update);
761
	update.handle = mask_bo->handle;
869
	update.handle = mask_bo->handle;
762
	update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
870
	update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
763
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
871
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
764
    mask_bo->pitch = update.bo_pitch;
872
    mask_bo->pitch = update.bo_pitch;
765
 
873
 
766
    memset(&src, 0, sizeof(src));
874
    memset(&src, 0, sizeof(src));
767
    memset(&dst, 0, sizeof(dst));
875
    memset(&dst, 0, sizeof(dst));
768
    memset(&mask, 0, sizeof(dst));
876
    memset(&mask, 0, sizeof(dst));
769
 
877
 
770
    src.drawable.bitsPerPixel = 32;
878
    src.drawable.bitsPerPixel = 32;
771
 
879
 
772
    src.drawable.width  = sf->width;
880
    src.drawable.width  = sf->width;
773
    src.drawable.height = sf->height;
881
    src.drawable.height = sf->height;
774
 
882
 
775
    dst.drawable.bitsPerPixel = 32;
883
    dst.drawable.bitsPerPixel = 32;
776
    dst.drawable.width  = sna_fb.width;
884
    dst.drawable.width  = sna_fb.width;
777
    dst.drawable.height = sna_fb.height;
885
    dst.drawable.height = sna_fb.height;
778
 
886
 
779
    mask.drawable.bitsPerPixel = 8;
887
    mask.drawable.bitsPerPixel = 8;
780
    mask.drawable.width  = update.width;
888
    mask.drawable.width  = update.width;
781
    mask.drawable.height = update.height;
889
    mask.drawable.height = update.height;
782
 
890
 
783
    memset(&composite, 0, sizeof(composite));
891
    memset(&composite, 0, sizeof(composite));
784
 
892
 
785
    src_bo = sf->bo;
893
    src_bo = sf->bo;
786
 
894
 
787
    __lock_acquire_recursive(__sna_lock);
895
    __lock_acquire_recursive(__sna_lock);
-
 
896
 
-
 
897
    {
-
 
898
        rect_t crtc, clip;
-
 
899
 
-
 
900
        crtc.l = 0;
-
 
901
        crtc.t = 0;
-
 
902
        crtc.r = sna_fb.width-1;
-
 
903
        crtc.b = sna_fb.height-1;
-
 
904
 
-
 
905
        clip.l = winx+dst_x;
-
 
906
        clip.t = winy+dst_y;
-
 
907
        clip.r = clip.l+w-1;
-
 
908
        clip.b = clip.t+h-1;
-
 
909
 
-
 
910
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
-
 
911
        sna_wait_for_scanline(sna_device, &crtc, &clip);
788
 
912
    }
789
 
913
 
790
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
914
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
791
		      &src, src_bo,
915
		      &src, src_bo,
792
		      &mask, mask_bo,
916
		      &mask, mask_bo,
793
		      &dst, sna_fb.fb_bo,
917
		      &dst, sna_fb.fb_bo,
794
              src_x, src_y,
918
              src_x, src_y,
795
              dst_x, dst_y,
919
              dst_x, dst_y,
796
              winx+dst_x, winy+dst_y,
920
              winx+dst_x, winy+dst_y,
797
              w, h,
921
              w, h,
798
              &composite) )
922
              &composite) )
799
    {
923
    {
800
	    struct sna_composite_rectangles r;
924
	    struct sna_composite_rectangles r;
801
 
925
 
802
	    r.src.x = src_x;
926
	    r.src.x = src_x;
803
	    r.src.y = src_y;
927
	    r.src.y = src_y;
804
	    r.mask.x = dst_x;
928
	    r.mask.x = dst_x;
805
	    r.mask.y = dst_y;
929
	    r.mask.y = dst_y;
806
		r.dst.x = winx+dst_x;
930
		r.dst.x = winx+dst_x;
807
	    r.dst.y = winy+dst_y;
931
	    r.dst.y = winy+dst_y;
808
	    r.width  = w;
932
	    r.width  = w;
809
	    r.height = h;
933
	    r.height = h;
810
 
934
 
811
        composite.blt(sna_device, &composite, &r);
935
        composite.blt(sna_device, &composite, &r);
812
        composite.done(sna_device, &composite);
936
        composite.done(sna_device, &composite);
813
 
937
 
814
    };
938
    };
815
 
939
 
816
    kgem_submit(&sna_device->kgem);
940
    kgem_submit(&sna_device->kgem);
817
 
941
 
818
    __lock_release_recursive(__sna_lock);
942
    __lock_release_recursive(__sna_lock);
819
 
943
 
820
    bitmap->data   = (void*)-1;
944
    bitmap->data   = (void*)-1;
821
    bitmap->pitch  = -1;
945
    bitmap->pitch  = -1;
822
 
946
 
823
    return 0;
947
    return 0;
824
}
948
}
825
 
949
 
826
 
950
 
827
 
951
 
828
 
952
 
829
 
953
 
830
 
954
 
831
 
955
 
832
static const struct intel_device_info intel_generic_info = {
956
static const struct intel_device_info intel_generic_info = {
833
	.gen = -1,
957
	.gen = -1,
834
};
958
};
835
 
959
 
836
static const struct intel_device_info intel_i915_info = {
960
static const struct intel_device_info intel_i915_info = {
837
	.gen = 030,
961
	.gen = 030,
838
};
962
};
839
static const struct intel_device_info intel_i945_info = {
963
static const struct intel_device_info intel_i945_info = {
840
	.gen = 031,
964
	.gen = 031,
841
};
965
};
842
 
966
 
843
static const struct intel_device_info intel_g33_info = {
967
static const struct intel_device_info intel_g33_info = {
844
	.gen = 033,
968
	.gen = 033,
845
};
969
};
846
 
970
 
847
static const struct intel_device_info intel_i965_info = {
971
static const struct intel_device_info intel_i965_info = {
848
	.gen = 040,
972
	.gen = 040,
849
};
973
};
850
 
974
 
851
static const struct intel_device_info intel_g4x_info = {
975
static const struct intel_device_info intel_g4x_info = {
852
	.gen = 045,
976
	.gen = 045,
853
};
977
};
854
 
978
 
855
static const struct intel_device_info intel_ironlake_info = {
979
static const struct intel_device_info intel_ironlake_info = {
856
	.gen = 050,
980
	.gen = 050,
857
};
981
};
858
 
982
 
859
static const struct intel_device_info intel_sandybridge_info = {
983
static const struct intel_device_info intel_sandybridge_info = {
860
	.gen = 060,
984
	.gen = 060,
861
};
985
};
862
 
986
 
863
static const struct intel_device_info intel_ivybridge_info = {
987
static const struct intel_device_info intel_ivybridge_info = {
864
	.gen = 070,
988
	.gen = 070,
865
};
989
};
866
 
990
 
867
static const struct intel_device_info intel_valleyview_info = {
991
static const struct intel_device_info intel_valleyview_info = {
868
	.gen = 071,
992
	.gen = 071,
869
};
993
};
870
 
994
 
871
static const struct intel_device_info intel_haswell_info = {
995
static const struct intel_device_info intel_haswell_info = {
872
	.gen = 075,
996
	.gen = 075,
873
};
997
};
874
 
998
 
875
#define INTEL_DEVICE_MATCH(d,i) \
999
#define INTEL_DEVICE_MATCH(d,i) \
876
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
1000
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
877
 
1001
 
878
 
1002
 
879
static const struct pci_id_match intel_device_match[] = {
1003
static const struct pci_id_match intel_device_match[] = {
880
 
1004
 
881
	INTEL_I915G_IDS(&intel_i915_info),
1005
	INTEL_I915G_IDS(&intel_i915_info),
882
	INTEL_I915GM_IDS(&intel_i915_info),
1006
	INTEL_I915GM_IDS(&intel_i915_info),
883
	INTEL_I945G_IDS(&intel_i945_info),
1007
	INTEL_I945G_IDS(&intel_i945_info),
884
	INTEL_I945GM_IDS(&intel_i945_info),
1008
	INTEL_I945GM_IDS(&intel_i945_info),
885
 
1009
 
886
	INTEL_G33_IDS(&intel_g33_info),
1010
	INTEL_G33_IDS(&intel_g33_info),
887
	INTEL_PINEVIEW_IDS(&intel_g33_info),
1011
	INTEL_PINEVIEW_IDS(&intel_g33_info),
888
 
1012
 
889
	INTEL_I965G_IDS(&intel_i965_info),
1013
	INTEL_I965G_IDS(&intel_i965_info),
890
	INTEL_I965GM_IDS(&intel_i965_info),
1014
	INTEL_I965GM_IDS(&intel_i965_info),
891
 
1015
 
892
	INTEL_G45_IDS(&intel_g4x_info),
1016
	INTEL_G45_IDS(&intel_g4x_info),
893
	INTEL_GM45_IDS(&intel_g4x_info),
1017
	INTEL_GM45_IDS(&intel_g4x_info),
894
 
1018
 
895
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
1019
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
896
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
1020
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
897
 
1021
 
898
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
1022
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
899
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
1023
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
900
 
1024
 
901
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
1025
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
902
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
1026
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
903
 
1027
 
904
	INTEL_HSW_D_IDS(&intel_haswell_info),
1028
	INTEL_HSW_D_IDS(&intel_haswell_info),
905
	INTEL_HSW_M_IDS(&intel_haswell_info),
1029
	INTEL_HSW_M_IDS(&intel_haswell_info),
906
 
1030
 
907
	INTEL_VLV_D_IDS(&intel_valleyview_info),
1031
	INTEL_VLV_D_IDS(&intel_valleyview_info),
908
	INTEL_VLV_M_IDS(&intel_valleyview_info),
1032
	INTEL_VLV_M_IDS(&intel_valleyview_info),
909
 
1033
 
910
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
1034
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
911
 
1035
 
912
	{ 0, 0, 0 },
1036
	{ 0, 0, 0 },
913
};
1037
};
914
 
1038
 
915
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
1039
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
916
{
1040
{
917
    while(list->device_id)
1041
    while(list->device_id)
918
    {
1042
    {
919
        if(dev==list->device_id)
1043
        if(dev==list->device_id)
920
            return list;
1044
            return list;
921
        list++;
1045
        list++;
922
    }
1046
    }
923
    return NULL;
1047
    return NULL;
924
}
1048
}
925
 
1049
 
926
const struct intel_device_info *
1050
const struct intel_device_info *
927
intel_detect_chipset(struct pci_device *pci)
1051
intel_detect_chipset(struct pci_device *pci)
928
{
1052
{
929
    const struct pci_id_match *ent = NULL;
1053
    const struct pci_id_match *ent = NULL;
930
 
1054
 
931
    ent = PciDevMatch(pci->device_id, intel_device_match);
1055
    ent = PciDevMatch(pci->device_id, intel_device_match);
932
 
1056
 
933
    if(ent != NULL)
1057
    if(ent != NULL)
934
        return (const struct intel_device_info*)ent->match_data;
1058
        return (const struct intel_device_info*)ent->match_data;
935
    else
1059
    else
936
        return &intel_generic_info;
1060
        return &intel_generic_info;
937
}
1061
}
938
 
1062
 
939
int intel_get_device_id(int fd)
1063
int intel_get_device_id(int fd)
940
{
1064
{
941
	struct drm_i915_getparam gp;
1065
	struct drm_i915_getparam gp;
942
	int devid = 0;
1066
	int devid = 0;
943
 
1067
 
944
	memset(&gp, 0, sizeof(gp));
1068
	memset(&gp, 0, sizeof(gp));
945
	gp.param = I915_PARAM_CHIPSET_ID;
1069
	gp.param = I915_PARAM_CHIPSET_ID;
946
	gp.value = &devid;
1070
	gp.value = &devid;
947
 
1071
 
948
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
1072
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
949
		return 0;
1073
		return 0;
950
 
1074
 
951
	return devid;
1075
	return devid;
952
}
1076
}
953
 
1077
 
954
int drmIoctl(int fd, unsigned long request, void *arg)
1078
int drmIoctl(int fd, unsigned long request, void *arg)
955
{
1079
{
956
    ioctl_t  io;
1080
    ioctl_t  io;
957
 
1081
 
958
    io.handle   = fd;
1082
    io.handle   = fd;
959
    io.io_code  = request;
1083
    io.io_code  = request;
960
    io.input    = arg;
1084
    io.input    = arg;
961
    io.inp_size = 64;
1085
    io.inp_size = 64;
962
    io.output   = NULL;
1086
    io.output   = NULL;
963
    io.out_size = 0;
1087
    io.out_size = 0;
964
 
1088
 
965
    return call_service(&io);
1089
    return call_service(&io);
966
}
1090
}
-
 
1091
 
-
 
1092
static>
-
 
1093
 
-
 
1094
static>
-
 
1095
#define>
-
 
1096
#define>