Subversion Repositories Kolibri OS

Rev

Rev 4375 | Rev 4501 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/**************************************************************************
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
5
 
6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
40
#include 
41
#include 
42
#include "i915_pciids.h"
43
 
44
#include "compiler.h"
45
#include "sna.h"
4375 Serge 46
#include "sna_reg.h"
4304 Serge 47
 
4315 Serge 48
#include 
4377 Serge 49
#include "../pixdriver.h"
50
 
4315 Serge 51
#include 
52
 
4304 Serge 53
#define to_surface(x) (surface_t*)((x)->handle)
54
 
4368 Serge 55
typedef struct {
56
    int l;
57
    int t;
58
    int r;
59
    int b;
60
} rect_t;
61
 
4304 Serge 62
static struct sna_fb sna_fb;
63
static int    tls_mask;
64
 
65
int tls_alloc(void);
66
 
67
static inline void *tls_get(int key)
68
{
69
    void *val;
70
    __asm__ __volatile__(
71
    "movl %%fs:(%1), %0"
72
    :"=r"(val)
73
    :"r"(key));
74
 
75
  return val;
76
};
77
 
78
static inline int
79
tls_set(int key, const void *ptr)
80
{
81
    if(!(key & 3))
82
    {
83
        __asm__ __volatile__(
84
        "movl %0, %%fs:(%1)"
85
        ::"r"(ptr),"r"(key));
86
        return 0;
87
    }
88
    else return -1;
89
}
90
 
91
 
92
 
93
 
94
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
95
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
96
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
97
				  unsigned flags, uint32_t width, uint32_t height,
98
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
99
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
100
                        int pitch, int height);
101
 
102
void kgem_close_batches(struct kgem *kgem);
103
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
104
 
105
 
106
static bool sna_solid_cache_init(struct sna *sna);
107
 
108
struct sna *sna_device;
109
 
110
__LOCK_INIT_RECURSIVE(, __sna_lock);
111
 
112
static void no_render_reset(struct sna *sna)
113
{
114
	(void)sna;
115
}
116
 
117
static void no_render_flush(struct sna *sna)
118
{
119
	(void)sna;
120
}
121
 
122
static void
123
no_render_context_switch(struct kgem *kgem,
124
			 int new_mode)
125
{
126
	if (!kgem->nbatch)
127
		return;
128
 
129
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
130
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
131
		_kgem_submit(kgem);
132
	}
133
 
134
	(void)new_mode;
135
}
136
 
137
static void
138
no_render_retire(struct kgem *kgem)
139
{
140
	(void)kgem;
141
}
142
 
143
static void
144
no_render_expire(struct kgem *kgem)
145
{
146
	(void)kgem;
147
}
148
 
149
static void
150
no_render_fini(struct sna *sna)
151
{
152
	(void)sna;
153
}
154
 
155
const char *no_render_init(struct sna *sna)
156
{
157
    struct sna_render *render = &sna->render;
158
 
159
    memset (render,0, sizeof (*render));
160
 
161
    render->prefer_gpu = PREFER_GPU_BLT;
162
 
163
    render->vertices = render->vertex_data;
164
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
165
 
166
    render->reset = no_render_reset;
167
	render->flush = no_render_flush;
168
	render->fini = no_render_fini;
169
 
170
	sna->kgem.context_switch = no_render_context_switch;
171
	sna->kgem.retire = no_render_retire;
172
	sna->kgem.expire = no_render_expire;
173
 
174
	sna->kgem.mode = KGEM_RENDER;
175
	sna->kgem.ring = KGEM_RENDER;
176
 
177
	sna_vertex_init(sna);
178
	return "generic";
179
 }
180
 
181
void sna_vertex_init(struct sna *sna)
182
{
183
//    pthread_mutex_init(&sna->render.lock, NULL);
184
//    pthread_cond_init(&sna->render.wait, NULL);
185
    sna->render.active = 0;
186
}
187
 
188
int sna_accel_init(struct sna *sna)
189
{
190
    const char *backend;
191
 
192
	backend = no_render_init(sna);
193
	if (sna->info->gen >= 0100)
194
		(void)backend;
195
	else if (sna->info->gen >= 070)
196
		backend = gen7_render_init(sna, backend);
197
	else if (sna->info->gen >= 060)
198
		backend = gen6_render_init(sna, backend);
199
	else if (sna->info->gen >= 050)
200
		backend = gen5_render_init(sna, backend);
201
	else if (sna->info->gen >= 040)
202
		backend = gen4_render_init(sna, backend);
203
	else if (sna->info->gen >= 030)
204
		backend = gen3_render_init(sna, backend);
205
 
206
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
207
	     __FUNCTION__, backend, sna->render.prefer_gpu));
208
 
209
	kgem_reset(&sna->kgem);
210
 
211
    sna_device = sna;
212
 
213
    return kgem_init_fb(&sna->kgem, &sna_fb);
214
}
215
 
216
 
217
#if 0
218
 
219
static bool sna_solid_cache_init(struct sna *sna)
220
{
221
    struct sna_solid_cache *cache = &sna->render.solid_cache;
222
 
223
    DBG(("%s\n", __FUNCTION__));
224
 
225
    cache->cache_bo =
226
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
227
    if (!cache->cache_bo)
228
        return FALSE;
229
 
230
    /*
231
     * Initialise [0] with white since it is very common and filling the
232
     * zeroth slot simplifies some of the checks.
233
     */
234
    cache->color[0] = 0xffffffff;
235
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
236
    cache->bo[0]->pitch = 4;
237
    cache->dirty = 1;
238
    cache->size = 1;
239
    cache->last = 0;
240
 
241
    return TRUE;
242
}
243
 
244
void
245
sna_render_flush_solid(struct sna *sna)
246
{
247
    struct sna_solid_cache *cache = &sna->render.solid_cache;
248
 
249
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
250
    assert(cache->dirty);
251
    assert(cache->size);
252
 
253
    kgem_bo_write(&sna->kgem, cache->cache_bo,
254
              cache->color, cache->size*sizeof(uint32_t));
255
    cache->dirty = 0;
256
    cache->last = 0;
257
}
258
 
259
static void
260
sna_render_finish_solid(struct sna *sna, bool force)
261
{
262
    struct sna_solid_cache *cache = &sna->render.solid_cache;
263
    int i;
264
 
265
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
266
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
267
 
268
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
269
        return;
270
 
271
    if (cache->dirty)
272
        sna_render_flush_solid(sna);
273
 
274
    for (i = 0; i < cache->size; i++) {
275
        if (cache->bo[i] == NULL)
276
            continue;
277
 
278
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
279
        cache->bo[i] = NULL;
280
    }
281
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
282
 
283
    DBG(("sna_render_finish_solid reset\n"));
284
 
285
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
286
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
287
    cache->bo[0]->pitch = 4;
288
    if (force)
289
        cache->size = 1;
290
}
291
 
292
 
293
struct kgem_bo *
294
sna_render_get_solid(struct sna *sna, uint32_t color)
295
{
296
    struct sna_solid_cache *cache = &sna->render.solid_cache;
297
    int i;
298
 
299
    DBG(("%s: %08x\n", __FUNCTION__, color));
300
 
301
//    if ((color & 0xffffff) == 0) /* alpha only */
302
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
303
 
304
    if (color == 0xffffffff) {
305
        DBG(("%s(white)\n", __FUNCTION__));
306
        return kgem_bo_reference(cache->bo[0]);
307
    }
308
 
309
    if (cache->color[cache->last] == color) {
310
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
311
             cache->last, color));
312
        return kgem_bo_reference(cache->bo[cache->last]);
313
    }
314
 
315
    for (i = 1; i < cache->size; i++) {
316
        if (cache->color[i] == color) {
317
            if (cache->bo[i] == NULL) {
318
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
319
                     i, color));
320
                goto create;
321
            } else {
322
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
323
                     i, color));
324
                goto done;
325
            }
326
        }
327
    }
328
 
329
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
330
 
331
    i = cache->size++;
332
    cache->color[i] = color;
333
    cache->dirty = 1;
334
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
335
 
336
create:
337
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
338
                     i*sizeof(uint32_t), sizeof(uint32_t));
339
    cache->bo[i]->pitch = 4;
340
 
341
done:
342
    cache->last = i;
343
    return kgem_bo_reference(cache->bo[i]);
344
}
345
 
346
#endif
347
 
348
 
349
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
350
                  int w, int h, int src_x, int src_y)
351
 
352
{
353
    struct sna_copy_op copy;
354
    struct _Pixmap src, dst;
355
    struct kgem_bo *src_bo;
356
 
357
    char proc_info[1024];
358
    int winx, winy;
359
 
360
    get_proc_info(proc_info);
361
 
362
    winx = *(uint32_t*)(proc_info+34);
363
    winy = *(uint32_t*)(proc_info+38);
364
 
365
    memset(&src, 0, sizeof(src));
366
    memset(&dst, 0, sizeof(dst));
367
 
368
    src.drawable.bitsPerPixel = 32;
369
    src.drawable.width  = src_bitmap->width;
370
    src.drawable.height = src_bitmap->height;
371
 
372
    dst.drawable.bitsPerPixel = 32;
373
    dst.drawable.width  = sna_fb.width;
374
    dst.drawable.height = sna_fb.height;
375
 
376
    memset(©, 0, sizeof(copy));
377
 
378
    src_bo = (struct kgem_bo*)src_bitmap->handle;
379
 
380
    if( sna_device->render.copy(sna_device, GXcopy,
381
                                &src, src_bo,
382
                                &dst, sna_fb.fb_bo, ©) )
383
    {
384
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
385
        copy.done(sna_device, ©);
386
    }
387
 
388
    kgem_submit(&sna_device->kgem);
389
 
390
    return 0;
391
 
392
//    __asm__ __volatile__("int3");
393
 
394
};
395
 
396
typedef struct
397
{
398
    uint32_t        width;
399
    uint32_t        height;
400
    void           *data;
401
    uint32_t        pitch;
402
    struct kgem_bo *bo;
403
    uint32_t        bo_size;
404
    uint32_t        flags;
405
}surface_t;
406
 
407
 
408
 
4372 Serge 409
 
410
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
411
#define MI_WAIT_FOR_EVENT			(0x03<<23)
412
 
4375 Serge 413
static bool sna_emit_wait_for_scanline_hsw(struct sna *sna,
414
                        rect_t *crtc,
415
                        int pipe, int y1, int y2,
416
                        bool full_height)
417
{
418
    uint32_t event;
419
    uint32_t *b;
420
 
421
    if (!sna->kgem.has_secure_batches)
422
        return false;
423
 
424
    b = kgem_get_batch(&sna->kgem);
425
    sna->kgem.nbatch += 17;
426
 
427
    switch (pipe) {
428
    default: assert(0);
429
    case 0: event = 1 << 0; break;
430
    case 1: event = 1 << 8; break;
431
    case 2: event = 1 << 14; break;
432
    }
433
 
434
    b[0] = MI_LOAD_REGISTER_IMM | 1;
435
    b[1] = 0x44050; /* DERRMR */
436
    b[2] = ~event;
437
    b[3] = MI_LOAD_REGISTER_IMM | 1;
438
    b[4] = 0xa188; /* FORCEWAKE_MT */
439
    b[5] = 2 << 16 | 2;
440
 
441
    /* The documentation says that the LOAD_SCAN_LINES command
442
     * always comes in pairs. Don't ask me why. */
443
    switch (pipe) {
444
    default: assert(0);
445
    case 0: event = 0 << 19; break;
446
    case 1: event = 1 << 19; break;
447
    case 2: event = 4 << 19; break;
448
    }
449
    b[8] = b[6] = MI_LOAD_SCAN_LINES_INCL | event;
450
    b[9] = b[7] = (y1 << 16) | (y2-1);
451
 
452
    switch (pipe) {
453
    default: assert(0);
454
    case 0: event = 1 << 0; break;
455
    case 1: event = 1 << 8; break;
456
    case 2: event = 1 << 14; break;
457
    }
458
    b[10] = MI_WAIT_FOR_EVENT | event;
459
 
460
    b[11] = MI_LOAD_REGISTER_IMM | 1;
461
    b[12] = 0xa188; /* FORCEWAKE_MT */
462
    b[13] = 2 << 16;
463
    b[14] = MI_LOAD_REGISTER_IMM | 1;
464
    b[15] = 0x44050; /* DERRMR */
465
    b[16] = ~0;
466
 
467
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
468
    return true;
469
}
470
 
471
 
472
static bool sna_emit_wait_for_scanline_ivb(struct sna *sna,
473
                        rect_t *crtc,
474
                        int pipe, int y1, int y2,
475
                        bool full_height)
476
{
477
    uint32_t *b;
478
    uint32_t event;
479
    uint32_t forcewake;
480
 
481
    if (!sna->kgem.has_secure_batches)
482
        return false;
483
 
484
    assert(y1 >= 0);
485
    assert(y2 > y1);
486
    assert(sna->kgem.mode);
487
 
488
    /* Always program one less than the desired value */
489
    if (--y1 < 0)
490
        y1 = crtc->b;
491
    y2--;
492
 
493
    switch (pipe) {
494
    default:
495
        assert(0);
496
    case 0:
497
        event = 1 << (full_height ? 3 : 0);
498
        break;
499
    case 1:
500
        event = 1 << (full_height ? 11 : 8);
501
        break;
502
    case 2:
503
        event = 1 << (full_height ? 21 : 14);
504
        break;
505
    }
506
 
507
    if (sna->kgem.gen == 071)
508
        forcewake = 0x1300b0; /* FORCEWAKE_VLV */
509
    else
510
        forcewake = 0xa188; /* FORCEWAKE_MT */
511
 
512
    b = kgem_get_batch(&sna->kgem);
513
 
514
    /* Both the LRI and WAIT_FOR_EVENT must be in the same cacheline */
515
    if (((sna->kgem.nbatch + 6) >> 4) != (sna->kgem.nbatch + 10) >> 4) {
516
        int dw = sna->kgem.nbatch + 6;
517
        dw = ALIGN(dw, 16) - dw;
518
        while (dw--)
519
            *b++ = MI_NOOP;
520
    }
521
 
522
    b[0] = MI_LOAD_REGISTER_IMM | 1;
523
    b[1] = 0x44050; /* DERRMR */
524
    b[2] = ~event;
525
    b[3] = MI_LOAD_REGISTER_IMM | 1;
526
    b[4] = forcewake;
527
    b[5] = 2 << 16 | 2;
528
    b[6] = MI_LOAD_REGISTER_IMM | 1;
529
    b[7] = 0x70068 + 0x1000 * pipe;
530
    b[8] = (1 << 31) | (1 << 30) | (y1 << 16) | y2;
531
    b[9] = MI_WAIT_FOR_EVENT | event;
532
    b[10] = MI_LOAD_REGISTER_IMM | 1;
533
    b[11] = forcewake;
534
    b[12] = 2 << 16;
535
    b[13] = MI_LOAD_REGISTER_IMM | 1;
536
    b[14] = 0x44050; /* DERRMR */
537
    b[15] = ~0;
538
 
539
    sna->kgem.nbatch = b - sna->kgem.batch + 16;
540
 
541
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
542
    return true;
543
}
544
 
545
 
4372 Serge 546
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
547
                        rect_t *crtc,
548
					    int pipe, int y1, int y2,
549
					    bool full_height)
4304 Serge 550
{
4372 Serge 551
	uint32_t *b;
552
	uint32_t event;
553
 
554
//	if (!sna->kgem.has_secure_batches)
555
//		return false;
556
 
557
	assert(y1 >= 0);
558
	assert(y2 > y1);
559
	assert(sna->kgem.mode == KGEM_RENDER);
560
 
561
	/* Always program one less than the desired value */
562
	if (--y1 < 0)
563
		y1 = crtc->b;
564
	y2--;
565
 
566
	/* The scanline granularity is 3 bits */
567
	y1 &= ~7;
568
	y2 &= ~7;
569
	if (y2 == y1)
570
		return false;
571
 
572
	event = 1 << (3*full_height + pipe*8);
573
 
574
	b = kgem_get_batch(&sna->kgem);
575
	sna->kgem.nbatch += 10;
576
 
577
	b[0] = MI_LOAD_REGISTER_IMM | 1;
578
	b[1] = 0x44050; /* DERRMR */
579
	b[2] = ~event;
580
	b[3] = MI_LOAD_REGISTER_IMM | 1;
581
	b[4] = 0x4f100; /* magic */
582
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
583
	b[6] = MI_WAIT_FOR_EVENT | event;
584
	b[7] = MI_LOAD_REGISTER_IMM | 1;
585
	b[8] = 0x44050; /* DERRMR */
586
	b[9] = ~0;
587
 
588
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
589
 
590
	return true;
591
}
592
 
4375 Serge 593
static bool sna_emit_wait_for_scanline_gen4(struct sna *sna,
594
                        rect_t *crtc,
595
                        int pipe, int y1, int y2,
596
                        bool full_height)
597
{
598
    uint32_t event;
599
    uint32_t *b;
600
 
601
    if (pipe == 0) {
602
        if (full_height)
603
            event = MI_WAIT_FOR_PIPEA_SVBLANK;
604
        else
605
            event = MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
606
    } else {
607
        if (full_height)
608
            event = MI_WAIT_FOR_PIPEB_SVBLANK;
609
        else
610
            event = MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
611
    }
612
 
613
    b = kgem_get_batch(&sna->kgem);
614
    sna->kgem.nbatch += 5;
615
 
616
    /* The documentation says that the LOAD_SCAN_LINES command
617
     * always comes in pairs. Don't ask me why. */
618
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
619
    b[3] = b[1] = (y1 << 16) | (y2-1);
620
    b[4] = MI_WAIT_FOR_EVENT | event;
621
 
622
    return true;
623
}
624
 
625
static bool sna_emit_wait_for_scanline_gen2(struct sna *sna,
626
                        rect_t *crtc,
627
                        int pipe, int y1, int y2,
628
                        bool full_height)
629
{
630
    uint32_t *b;
631
 
632
    /*
633
     * Pre-965 doesn't have SVBLANK, so we need a bit
634
     * of extra time for the blitter to start up and
635
     * do its job for a full height blit
636
     */
637
    if (full_height)
638
        y2 -= 2;
639
 
640
    b = kgem_get_batch(&sna->kgem);
641
    sna->kgem.nbatch += 5;
642
 
643
    /* The documentation says that the LOAD_SCAN_LINES command
644
     * always comes in pairs. Don't ask me why. */
645
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
646
    b[3] = b[1] = (y1 << 16) | (y2-1);
647
    b[4] = MI_WAIT_FOR_EVENT | 1 << (1 + 4*pipe);
648
 
649
    return true;
650
}
651
 
4372 Serge 652
bool
653
sna_wait_for_scanline(struct sna *sna,
654
		      rect_t *crtc,
655
		      rect_t *clip)
656
{
657
	bool full_height;
658
	int y1, y2, pipe;
659
	bool ret;
660
 
661
//	if (sna->flags & SNA_NO_VSYNC)
662
//		return false;
663
 
664
	/*
665
	 * Make sure we don't wait for a scanline that will
666
	 * never occur
667
	 */
668
	y1 = clip->t - crtc->t;
4374 Serge 669
    if (y1 < 1)
670
        y1 = 1;
4372 Serge 671
	y2 = clip->b - crtc->t;
672
	if (y2 > crtc->b - crtc->t)
673
		y2 = crtc->b - crtc->t;
674
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
675
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
676
 
677
	if (y2 <= y1 + 4)
678
		return false;
679
 
680
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
681
 
4377 Serge 682
    pipe = sna_fb.pipe;
4372 Serge 683
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
684
	     __FUNCTION__, pipe, y1, y2, full_height));
685
 
686
	if (sna->kgem.gen >= 0100)
687
		ret = false;
4375 Serge 688
    else if (sna->kgem.gen >= 075)
689
        ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
690
    else if (sna->kgem.gen >= 070)
691
        ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
4372 Serge 692
	else if (sna->kgem.gen >= 060)
693
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
4375 Serge 694
    else if (sna->kgem.gen >= 040)
695
        ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
696
    else
697
        ret = sna_emit_wait_for_scanline_gen2(sna, crtc, pipe, y1, y2, full_height);
4372 Serge 698
 
699
	return ret;
700
}
701
 
702
 
703
 
704
 
705
 
706
 
707
 
708
 
709
 
710
static const struct intel_device_info intel_generic_info = {
711
	.gen = -1,
712
};
713
 
714
static const struct intel_device_info intel_i915_info = {
715
	.gen = 030,
716
};
717
static const struct intel_device_info intel_i945_info = {
718
	.gen = 031,
719
};
720
 
721
static const struct intel_device_info intel_g33_info = {
722
	.gen = 033,
723
};
724
 
725
static const struct intel_device_info intel_i965_info = {
726
	.gen = 040,
727
};
728
 
729
static const struct intel_device_info intel_g4x_info = {
730
	.gen = 045,
731
};
732
 
733
static const struct intel_device_info intel_ironlake_info = {
734
	.gen = 050,
735
};
736
 
737
static const struct intel_device_info intel_sandybridge_info = {
738
	.gen = 060,
739
};
740
 
741
static const struct intel_device_info intel_ivybridge_info = {
742
	.gen = 070,
743
};
744
 
745
static const struct intel_device_info intel_valleyview_info = {
746
	.gen = 071,
747
};
748
 
749
static const struct intel_device_info intel_haswell_info = {
750
	.gen = 075,
751
};
752
 
753
#define INTEL_DEVICE_MATCH(d,i) \
754
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
755
 
756
 
757
static const struct pci_id_match intel_device_match[] = {
758
 
759
	INTEL_I915G_IDS(&intel_i915_info),
760
	INTEL_I915GM_IDS(&intel_i915_info),
761
	INTEL_I945G_IDS(&intel_i945_info),
762
	INTEL_I945GM_IDS(&intel_i945_info),
763
 
764
	INTEL_G33_IDS(&intel_g33_info),
765
	INTEL_PINEVIEW_IDS(&intel_g33_info),
766
 
767
	INTEL_I965G_IDS(&intel_i965_info),
768
	INTEL_I965GM_IDS(&intel_i965_info),
769
 
770
	INTEL_G45_IDS(&intel_g4x_info),
771
	INTEL_GM45_IDS(&intel_g4x_info),
772
 
773
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
774
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
775
 
776
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
777
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
778
 
779
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
780
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
781
 
782
	INTEL_HSW_D_IDS(&intel_haswell_info),
783
	INTEL_HSW_M_IDS(&intel_haswell_info),
784
 
785
	INTEL_VLV_D_IDS(&intel_valleyview_info),
786
	INTEL_VLV_M_IDS(&intel_valleyview_info),
787
 
788
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
789
 
790
	{ 0, 0, 0 },
791
};
792
 
793
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
794
{
795
    while(list->device_id)
796
    {
797
        if(dev==list->device_id)
798
            return list;
799
        list++;
800
    }
801
    return NULL;
802
}
803
 
804
const struct intel_device_info *
805
intel_detect_chipset(struct pci_device *pci)
806
{
807
    const struct pci_id_match *ent = NULL;
808
 
809
    ent = PciDevMatch(pci->device_id, intel_device_match);
810
 
811
    if(ent != NULL)
812
        return (const struct intel_device_info*)ent->match_data;
813
    else
814
        return &intel_generic_info;
815
}
816
 
817
int intel_get_device_id(int fd)
818
{
819
	struct drm_i915_getparam gp;
820
	int devid = 0;
821
 
822
	memset(&gp, 0, sizeof(gp));
823
	gp.param = I915_PARAM_CHIPSET_ID;
824
	gp.value = &devid;
825
 
826
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
827
		return 0;
828
 
829
	return devid;
830
}
831
 
832
int drmIoctl(int fd, unsigned long request, void *arg)
833
{
834
    ioctl_t  io;
835
 
836
    io.handle   = fd;
837
    io.io_code  = request;
838
    io.input    = arg;
839
    io.inp_size = 64;
840
    io.output   = NULL;
841
    io.out_size = 0;
842
 
843
    return call_service(&io);
844
}
845
 
846
 
847
 
848
bool
849
gen6_composite(struct sna *sna,
850
              uint8_t op,
851
              PixmapPtr src, struct kgem_bo *src_bo,
852
              PixmapPtr mask,struct kgem_bo *mask_bo,
853
              PixmapPtr dst, struct kgem_bo *dst_bo,
854
              int32_t src_x, int32_t src_y,
855
              int32_t msk_x, int32_t msk_y,
856
              int32_t dst_x, int32_t dst_y,
857
              int32_t width, int32_t height,
858
              struct sna_composite_op *tmp);
859
 
860
//#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
861
 
862
 
863
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
864
{
4304 Serge 865
    surface_t *sf;
4372 Serge 866
    struct kgem_bo *bo;
4304 Serge 867
 
868
    sf = malloc(sizeof(*sf));
869
    if(sf == NULL)
870
        goto err_1;
871
 
872
    __lock_acquire_recursive(__sna_lock);
873
 
4372 Serge 874
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
4304 Serge 875
 
4372 Serge 876
    __lock_release_recursive(__sna_lock);
4304 Serge 877
 
878
    sf->width   = bitmap->width;
879
    sf->height  = bitmap->height;
4372 Serge 880
    sf->data    = NULL;
4304 Serge 881
    sf->pitch   = bo->pitch;
882
    sf->bo      = bo;
883
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
884
    sf->flags   = bitmap->flags;
885
 
886
    bitmap->handle = (uint32_t)sf;
887
 
888
    return 0;
889
 
890
err_2:
891
    __lock_release_recursive(__sna_lock);
892
    free(sf);
893
err_1:
894
    return -1;
895
};
896
 
4372 Serge 897
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
4304 Serge 898
{
4372 Serge 899
    surface_t *sf = to_surface(bitmap);
900
    struct kgem_bo *bo = sf->bo;
901
    bo->handle = handle;
902
}
903
 
904
static int sna_create_bitmap(bitmap_t *bitmap)
905
{
4304 Serge 906
    surface_t *sf;
4372 Serge 907
    struct kgem_bo *bo;
4304 Serge 908
 
909
    sf = malloc(sizeof(*sf));
910
    if(sf == NULL)
911
        goto err_1;
912
 
913
    __lock_acquire_recursive(__sna_lock);
914
 
4372 Serge 915
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
916
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
4304 Serge 917
 
4372 Serge 918
    if(bo == NULL)
919
        goto err_2;
4304 Serge 920
 
4372 Serge 921
    void *map = kgem_bo_map(&sna_device->kgem, bo);
922
    if(map == NULL)
923
        goto err_3;
924
 
4304 Serge 925
    sf->width   = bitmap->width;
926
    sf->height  = bitmap->height;
4372 Serge 927
    sf->data    = map;
4304 Serge 928
    sf->pitch   = bo->pitch;
929
    sf->bo      = bo;
930
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
931
    sf->flags   = bitmap->flags;
932
 
933
    bitmap->handle = (uint32_t)sf;
4372 Serge 934
    __lock_release_recursive(__sna_lock);
4304 Serge 935
 
936
    return 0;
937
 
4372 Serge 938
err_3:
939
    kgem_bo_destroy(&sna_device->kgem, bo);
4304 Serge 940
err_2:
941
    __lock_release_recursive(__sna_lock);
942
    free(sf);
943
err_1:
944
    return -1;
945
};
946
 
4372 Serge 947
static int sna_destroy_bitmap(bitmap_t *bitmap)
4304 Serge 948
{
949
    surface_t *sf = to_surface(bitmap);
950
 
951
    __lock_acquire_recursive(__sna_lock);
952
 
953
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
954
 
955
    __lock_release_recursive(__sna_lock);
956
 
957
    free(sf);
958
 
959
    bitmap->handle = -1;
960
    bitmap->data   = (void*)-1;
961
    bitmap->pitch  = -1;
962
 
963
    return 0;
964
};
965
 
4372 Serge 966
static int sna_lock_bitmap(bitmap_t *bitmap)
4304 Serge 967
{
968
    surface_t *sf = to_surface(bitmap);
969
 
970
//    printf("%s\n", __FUNCTION__);
971
    __lock_acquire_recursive(__sna_lock);
972
 
973
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
974
 
975
    __lock_release_recursive(__sna_lock);
976
 
977
    bitmap->data  = sf->data;
978
    bitmap->pitch = sf->pitch;
979
 
980
    return 0;
981
};
982
 
4372 Serge 983
static int sna_resize_bitmap(bitmap_t *bitmap)
4304 Serge 984
{
985
    surface_t *sf = to_surface(bitmap);
986
    struct kgem *kgem = &sna_device->kgem;
987
    struct kgem_bo *bo = sf->bo;
988
 
989
    uint32_t   size;
990
    uint32_t   pitch;
991
 
4372 Serge 992
    bitmap->pitch = -1;
4304 Serge 993
    bitmap->data = (void *) -1;
994
 
4372 Serge 995
    size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
996
                 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
997
    assert(size && size <= kgem->max_object_size);
4304 Serge 998
 
999
    if(sf->bo_size >= size)
1000
    {
1001
        sf->width   = bitmap->width;
1002
        sf->height  = bitmap->height;
1003
        sf->pitch   = pitch;
1004
        bo->pitch   = pitch;
1005
 
4372 Serge 1006
        return 0;
4304 Serge 1007
    }
1008
    else
1009
    {
1010
        __lock_acquire_recursive(__sna_lock);
1011
 
1012
        sna_bo_destroy(kgem, bo);
1013
 
1014
        sf->bo = NULL;
1015
 
1016
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
1017
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
1018
 
1019
        if(bo == NULL)
1020
        {
1021
            __lock_release_recursive(__sna_lock);
1022
            return -1;
1023
        };
1024
 
1025
        void *map = kgem_bo_map(kgem, bo);
1026
        if(map == NULL)
1027
        {
1028
            sna_bo_destroy(kgem, bo);
1029
            __lock_release_recursive(__sna_lock);
1030
            return -1;
1031
        };
1032
 
1033
        __lock_release_recursive(__sna_lock);
1034
 
1035
        sf->width   = bitmap->width;
1036
        sf->height  = bitmap->height;
1037
        sf->data    = map;
1038
        sf->pitch   = bo->pitch;
1039
        sf->bo      = bo;
1040
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
1041
    }
1042
 
1043
    return 0;
1044
};
1045
 
1046
 
1047
 
1048
int sna_create_mask()
1049
{
4372 Serge 1050
    struct kgem_bo *bo;
4304 Serge 1051
 
1052
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
1053
 
1054
    __lock_acquire_recursive(__sna_lock);
1055
 
1056
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
1057
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
1058
 
1059
    if(unlikely(bo == NULL))
1060
        goto err_1;
1061
 
1062
    int *map = kgem_bo_map(&sna_device->kgem, bo);
1063
    if(map == NULL)
1064
        goto err_2;
1065
 
1066
    __lock_release_recursive(__sna_lock);
1067
 
1068
    memset(map, 0, bo->pitch * sna_fb.height);
1069
 
1070
    tls_set(tls_mask, bo);
1071
 
1072
    return 0;
1073
 
1074
err_2:
1075
    kgem_bo_destroy(&sna_device->kgem, bo);
1076
err_1:
1077
    __lock_release_recursive(__sna_lock);
1078
    return -1;
1079
};
1080
 
1081
 
4368 Serge 1082
 
4377 Serge 1083
int sna_blit_tex(bitmap_t *bitmap, int scale, int vsync,
1084
                 int dst_x, int dst_y,int w, int h, int src_x, int src_y)
4304 Serge 1085
 
1086
{
1087
    surface_t *sf = to_surface(bitmap);
1088
 
1089
    struct drm_i915_mask_update update;
1090
 
1091
    struct sna_composite_op composite;
1092
    struct _Pixmap src, dst, mask;
1093
    struct kgem_bo *src_bo, *mask_bo;
1094
    int winx, winy;
1095
 
1096
    char proc_info[1024];
1097
 
1098
    get_proc_info(proc_info);
1099
 
1100
    winx = *(uint32_t*)(proc_info+34);
1101
    winy = *(uint32_t*)(proc_info+38);
1102
//    winw = *(uint32_t*)(proc_info+42)+1;
1103
//    winh = *(uint32_t*)(proc_info+46)+1;
1104
 
1105
    mask_bo = tls_get(tls_mask);
1106
 
1107
    if(unlikely(mask_bo == NULL))
1108
    {
1109
        sna_create_mask();
1110
        mask_bo = tls_get(tls_mask);
1111
        if( mask_bo == NULL)
1112
            return -1;
1113
    };
1114
 
1115
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
1116
    {
1117
        __lock_acquire_recursive(__sna_lock);
1118
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
1119
        __lock_release_recursive(__sna_lock);
1120
 
1121
        sna_create_mask();
1122
        mask_bo = tls_get(tls_mask);
1123
        if( mask_bo == NULL)
1124
            return -1;
1125
    }
1126
 
1127
    VG_CLEAR(update);
4372 Serge 1128
    update.handle = mask_bo->handle;
1129
    update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
1130
    drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
4304 Serge 1131
    mask_bo->pitch = update.bo_pitch;
1132
 
1133
    memset(&src, 0, sizeof(src));
1134
    memset(&dst, 0, sizeof(dst));
1135
    memset(&mask, 0, sizeof(dst));
1136
 
1137
    src.drawable.bitsPerPixel = 32;
1138
 
1139
    src.drawable.width  = sf->width;
1140
    src.drawable.height = sf->height;
1141
 
1142
    dst.drawable.bitsPerPixel = 32;
1143
    dst.drawable.width  = sna_fb.width;
1144
    dst.drawable.height = sna_fb.height;
1145
 
1146
    mask.drawable.bitsPerPixel = 8;
1147
    mask.drawable.width  = update.width;
1148
    mask.drawable.height = update.height;
1149
 
1150
    memset(&composite, 0, sizeof(composite));
1151
 
1152
    src_bo = sf->bo;
1153
 
1154
    __lock_acquire_recursive(__sna_lock);
1155
 
4377 Serge 1156
    if(vsync)
4368 Serge 1157
    {
1158
        rect_t crtc, clip;
4304 Serge 1159
 
4368 Serge 1160
        crtc.l = 0;
1161
        crtc.t = 0;
1162
        crtc.r = sna_fb.width-1;
1163
        crtc.b = sna_fb.height-1;
1164
 
1165
        clip.l = winx+dst_x;
1166
        clip.t = winy+dst_y;
1167
        clip.r = clip.l+w-1;
1168
        clip.b = clip.t+h-1;
1169
 
1170
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
1171
        sna_wait_for_scanline(sna_device, &crtc, &clip);
1172
    }
1173
 
4304 Serge 1174
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
4372 Serge 1175
              &src, src_bo,
1176
              &mask, mask_bo,
1177
              &dst, sna_fb.fb_bo,
4304 Serge 1178
              src_x, src_y,
1179
              dst_x, dst_y,
1180
              winx+dst_x, winy+dst_y,
1181
              w, h,
1182
              &composite) )
1183
    {
4372 Serge 1184
        struct sna_composite_rectangles r;
4304 Serge 1185
 
4372 Serge 1186
        r.src.x = src_x;
1187
        r.src.y = src_y;
1188
        r.mask.x = dst_x;
1189
        r.mask.y = dst_y;
1190
        r.dst.x = winx+dst_x;
1191
        r.dst.y = winy+dst_y;
1192
        r.width  = w;
1193
        r.height = h;
4304 Serge 1194
 
1195
        composite.blt(sna_device, &composite, &r);
1196
        composite.done(sna_device, &composite);
1197
 
1198
    };
1199
 
1200
    kgem_submit(&sna_device->kgem);
1201
 
1202
    __lock_release_recursive(__sna_lock);
1203
 
1204
    bitmap->data   = (void*)-1;
1205
    bitmap->pitch  = -1;
1206
 
1207
    return 0;
1208
}
1209
 
1210
 
4372 Serge 1211
static void sna_fini()
1212
{
1213
    if( sna_device )
1214
    {
1215
        struct kgem_bo *mask;
4304 Serge 1216
 
4372 Serge 1217
        __lock_acquire_recursive(__sna_lock);
4304 Serge 1218
 
4372 Serge 1219
        mask = tls_get(tls_mask);
4304 Serge 1220
 
4372 Serge 1221
        sna_device->render.fini(sna_device);
1222
        if(mask)
1223
            kgem_bo_destroy(&sna_device->kgem, mask);
1224
//        kgem_close_batches(&sna_device->kgem);
1225
        kgem_cleanup_cache(&sna_device->kgem);
4304 Serge 1226
 
4372 Serge 1227
        sna_device = NULL;
1228
        __lock_release_recursive(__sna_lock);
1229
    };
1230
}
4304 Serge 1231
 
4372 Serge 1232
uint32_t DrvInit(uint32_t service, struct pix_driver *driver)
1233
{
1234
    ioctl_t   io;
1235
    int caps = 0;
4304 Serge 1236
 
4372 Serge 1237
    static struct pci_device device;
1238
    struct sna *sna;
4304 Serge 1239
 
4372 Serge 1240
    DBG(("%s\n", __FUNCTION__));
4304 Serge 1241
 
4372 Serge 1242
    __lock_acquire_recursive(__sna_lock);
4304 Serge 1243
 
4372 Serge 1244
    if(sna_device)
1245
        goto done;
4304 Serge 1246
 
4372 Serge 1247
    io.handle   = service;
1248
    io.io_code  = SRV_GET_PCI_INFO;
1249
    io.input    = &device;
1250
    io.inp_size = sizeof(device);
1251
    io.output   = NULL;
1252
    io.out_size = 0;
4304 Serge 1253
 
4372 Serge 1254
    if (call_service(&io)!=0)
1255
        goto err1;
4304 Serge 1256
 
4372 Serge 1257
    sna = malloc(sizeof(*sna));
1258
    if (sna == NULL)
1259
        goto err1;
4304 Serge 1260
 
4372 Serge 1261
    memset(sna, 0, sizeof(*sna));
4304 Serge 1262
 
4372 Serge 1263
    sna->cpu_features = sna_cpu_detect();
4304 Serge 1264
 
4372 Serge 1265
    sna->PciInfo = &device;
1266
    sna->info = intel_detect_chipset(sna->PciInfo);
1267
    sna->scrn = service;
4304 Serge 1268
 
4372 Serge 1269
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
4304 Serge 1270
 
4372 Serge 1271
    /* Disable tiling by default */
1272
    sna->tiling = 0;
4304 Serge 1273
 
4372 Serge 1274
    /* Default fail-safe value of 75 Hz */
1275
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
4304 Serge 1276
 
4372 Serge 1277
    sna->flags = 0;
4304 Serge 1278
 
4372 Serge 1279
    sna_accel_init(sna);
4304 Serge 1280
 
4372 Serge 1281
    tls_mask = tls_alloc();
4304 Serge 1282
 
4372 Serge 1283
//    printf("tls mask %x\n", tls_mask);
4304 Serge 1284
 
4372 Serge 1285
    driver->create_bitmap  = sna_create_bitmap;
1286
    driver->destroy_bitmap = sna_destroy_bitmap;
1287
    driver->lock_bitmap    = sna_lock_bitmap;
1288
    driver->blit           = sna_blit_tex;
1289
    driver->resize_bitmap  = sna_resize_bitmap;
1290
    driver->fini           = sna_fini;
1291
done:
1292
    caps = sna_device->render.caps;
4304 Serge 1293
 
4372 Serge 1294
err1:
1295
    __lock_release_recursive(__sna_lock);
4304 Serge 1296
 
4372 Serge 1297
    return caps;
4304 Serge 1298
}
1299
 
1300
 
1301