Subversion Repositories Kolibri OS

Rev

Rev 4374 | Rev 4377 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/**************************************************************************
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
5
 
6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
40
#include 
41
#include 
42
#include "i915_pciids.h"
43
 
44
#include "compiler.h"
45
#include "sna.h"
4375 Serge 46
#include "sna_reg.h"
4304 Serge 47
 
4315 Serge 48
#include 
49
#include 
50
 
4304 Serge 51
#define to_surface(x) (surface_t*)((x)->handle)
52
 
4368 Serge 53
typedef struct {
54
    int l;
55
    int t;
56
    int r;
57
    int b;
58
} rect_t;
59
 
4372 Serge 60
struct pix_driver
61
{
62
    char *name;
63
 
64
    int (*create_bitmap)(bitmap_t * bitmap);
65
    int (*destroy_bitmap)(bitmap_t * bitmap);
66
    int (*lock_bitmap)(bitmap_t * bitmap);
67
    int (*blit)(bitmap_t * bitmap, bool scale, int dst_x, int dst_y,
68
                int w, int h, int src_x, int src_y);
69
    int (*resize_bitmap)(bitmap_t * bitmap);
70
    void (*fini)(void);
71
};
72
 
73
 
4304 Serge 74
static struct sna_fb sna_fb;
75
static int    tls_mask;
76
 
77
int tls_alloc(void);
78
 
79
static inline void *tls_get(int key)
80
{
81
    void *val;
82
    __asm__ __volatile__(
83
    "movl %%fs:(%1), %0"
84
    :"=r"(val)
85
    :"r"(key));
86
 
87
  return val;
88
};
89
 
90
static inline int
91
tls_set(int key, const void *ptr)
92
{
93
    if(!(key & 3))
94
    {
95
        __asm__ __volatile__(
96
        "movl %0, %%fs:(%1)"
97
        ::"r"(ptr),"r"(key));
98
        return 0;
99
    }
100
    else return -1;
101
}
102
 
103
 
104
 
105
 
106
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
107
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
108
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
109
				  unsigned flags, uint32_t width, uint32_t height,
110
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
111
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
112
                        int pitch, int height);
113
 
114
void kgem_close_batches(struct kgem *kgem);
115
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
116
 
117
 
118
static bool sna_solid_cache_init(struct sna *sna);
119
 
120
struct sna *sna_device;
121
 
122
__LOCK_INIT_RECURSIVE(, __sna_lock);
123
 
124
static void no_render_reset(struct sna *sna)
125
{
126
	(void)sna;
127
}
128
 
129
static void no_render_flush(struct sna *sna)
130
{
131
	(void)sna;
132
}
133
 
134
static void
135
no_render_context_switch(struct kgem *kgem,
136
			 int new_mode)
137
{
138
	if (!kgem->nbatch)
139
		return;
140
 
141
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
142
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
143
		_kgem_submit(kgem);
144
	}
145
 
146
	(void)new_mode;
147
}
148
 
149
static void
150
no_render_retire(struct kgem *kgem)
151
{
152
	(void)kgem;
153
}
154
 
155
static void
156
no_render_expire(struct kgem *kgem)
157
{
158
	(void)kgem;
159
}
160
 
161
static void
162
no_render_fini(struct sna *sna)
163
{
164
	(void)sna;
165
}
166
 
167
const char *no_render_init(struct sna *sna)
168
{
169
    struct sna_render *render = &sna->render;
170
 
171
    memset (render,0, sizeof (*render));
172
 
173
    render->prefer_gpu = PREFER_GPU_BLT;
174
 
175
    render->vertices = render->vertex_data;
176
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
177
 
178
    render->reset = no_render_reset;
179
	render->flush = no_render_flush;
180
	render->fini = no_render_fini;
181
 
182
	sna->kgem.context_switch = no_render_context_switch;
183
	sna->kgem.retire = no_render_retire;
184
	sna->kgem.expire = no_render_expire;
185
 
186
	sna->kgem.mode = KGEM_RENDER;
187
	sna->kgem.ring = KGEM_RENDER;
188
 
189
	sna_vertex_init(sna);
190
	return "generic";
191
 }
192
 
193
void sna_vertex_init(struct sna *sna)
194
{
195
//    pthread_mutex_init(&sna->render.lock, NULL);
196
//    pthread_cond_init(&sna->render.wait, NULL);
197
    sna->render.active = 0;
198
}
199
 
200
int sna_accel_init(struct sna *sna)
201
{
202
    const char *backend;
203
 
204
	backend = no_render_init(sna);
205
	if (sna->info->gen >= 0100)
206
		(void)backend;
207
	else if (sna->info->gen >= 070)
208
		backend = gen7_render_init(sna, backend);
209
	else if (sna->info->gen >= 060)
210
		backend = gen6_render_init(sna, backend);
211
	else if (sna->info->gen >= 050)
212
		backend = gen5_render_init(sna, backend);
213
	else if (sna->info->gen >= 040)
214
		backend = gen4_render_init(sna, backend);
215
	else if (sna->info->gen >= 030)
216
		backend = gen3_render_init(sna, backend);
217
 
218
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
219
	     __FUNCTION__, backend, sna->render.prefer_gpu));
220
 
221
	kgem_reset(&sna->kgem);
222
 
223
    sna_device = sna;
224
 
225
    return kgem_init_fb(&sna->kgem, &sna_fb);
226
}
227
 
228
 
229
#if 0
230
 
231
static bool sna_solid_cache_init(struct sna *sna)
232
{
233
    struct sna_solid_cache *cache = &sna->render.solid_cache;
234
 
235
    DBG(("%s\n", __FUNCTION__));
236
 
237
    cache->cache_bo =
238
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
239
    if (!cache->cache_bo)
240
        return FALSE;
241
 
242
    /*
243
     * Initialise [0] with white since it is very common and filling the
244
     * zeroth slot simplifies some of the checks.
245
     */
246
    cache->color[0] = 0xffffffff;
247
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
248
    cache->bo[0]->pitch = 4;
249
    cache->dirty = 1;
250
    cache->size = 1;
251
    cache->last = 0;
252
 
253
    return TRUE;
254
}
255
 
256
void
257
sna_render_flush_solid(struct sna *sna)
258
{
259
    struct sna_solid_cache *cache = &sna->render.solid_cache;
260
 
261
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
262
    assert(cache->dirty);
263
    assert(cache->size);
264
 
265
    kgem_bo_write(&sna->kgem, cache->cache_bo,
266
              cache->color, cache->size*sizeof(uint32_t));
267
    cache->dirty = 0;
268
    cache->last = 0;
269
}
270
 
271
static void
272
sna_render_finish_solid(struct sna *sna, bool force)
273
{
274
    struct sna_solid_cache *cache = &sna->render.solid_cache;
275
    int i;
276
 
277
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
278
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
279
 
280
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
281
        return;
282
 
283
    if (cache->dirty)
284
        sna_render_flush_solid(sna);
285
 
286
    for (i = 0; i < cache->size; i++) {
287
        if (cache->bo[i] == NULL)
288
            continue;
289
 
290
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
291
        cache->bo[i] = NULL;
292
    }
293
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
294
 
295
    DBG(("sna_render_finish_solid reset\n"));
296
 
297
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
298
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
299
    cache->bo[0]->pitch = 4;
300
    if (force)
301
        cache->size = 1;
302
}
303
 
304
 
305
struct kgem_bo *
306
sna_render_get_solid(struct sna *sna, uint32_t color)
307
{
308
    struct sna_solid_cache *cache = &sna->render.solid_cache;
309
    int i;
310
 
311
    DBG(("%s: %08x\n", __FUNCTION__, color));
312
 
313
//    if ((color & 0xffffff) == 0) /* alpha only */
314
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
315
 
316
    if (color == 0xffffffff) {
317
        DBG(("%s(white)\n", __FUNCTION__));
318
        return kgem_bo_reference(cache->bo[0]);
319
    }
320
 
321
    if (cache->color[cache->last] == color) {
322
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
323
             cache->last, color));
324
        return kgem_bo_reference(cache->bo[cache->last]);
325
    }
326
 
327
    for (i = 1; i < cache->size; i++) {
328
        if (cache->color[i] == color) {
329
            if (cache->bo[i] == NULL) {
330
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
331
                     i, color));
332
                goto create;
333
            } else {
334
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
335
                     i, color));
336
                goto done;
337
            }
338
        }
339
    }
340
 
341
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
342
 
343
    i = cache->size++;
344
    cache->color[i] = color;
345
    cache->dirty = 1;
346
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
347
 
348
create:
349
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
350
                     i*sizeof(uint32_t), sizeof(uint32_t));
351
    cache->bo[i]->pitch = 4;
352
 
353
done:
354
    cache->last = i;
355
    return kgem_bo_reference(cache->bo[i]);
356
}
357
 
358
#endif
359
 
360
 
361
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
362
                  int w, int h, int src_x, int src_y)
363
 
364
{
365
    struct sna_copy_op copy;
366
    struct _Pixmap src, dst;
367
    struct kgem_bo *src_bo;
368
 
369
    char proc_info[1024];
370
    int winx, winy;
371
 
372
    get_proc_info(proc_info);
373
 
374
    winx = *(uint32_t*)(proc_info+34);
375
    winy = *(uint32_t*)(proc_info+38);
376
 
377
    memset(&src, 0, sizeof(src));
378
    memset(&dst, 0, sizeof(dst));
379
 
380
    src.drawable.bitsPerPixel = 32;
381
    src.drawable.width  = src_bitmap->width;
382
    src.drawable.height = src_bitmap->height;
383
 
384
    dst.drawable.bitsPerPixel = 32;
385
    dst.drawable.width  = sna_fb.width;
386
    dst.drawable.height = sna_fb.height;
387
 
388
    memset(©, 0, sizeof(copy));
389
 
390
    src_bo = (struct kgem_bo*)src_bitmap->handle;
391
 
392
    if( sna_device->render.copy(sna_device, GXcopy,
393
                                &src, src_bo,
394
                                &dst, sna_fb.fb_bo, ©) )
395
    {
396
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
397
        copy.done(sna_device, ©);
398
    }
399
 
400
    kgem_submit(&sna_device->kgem);
401
 
402
    return 0;
403
 
404
//    __asm__ __volatile__("int3");
405
 
406
};
407
 
408
typedef struct
409
{
410
    uint32_t        width;
411
    uint32_t        height;
412
    void           *data;
413
    uint32_t        pitch;
414
    struct kgem_bo *bo;
415
    uint32_t        bo_size;
416
    uint32_t        flags;
417
}surface_t;
418
 
419
 
420
 
4372 Serge 421
 
422
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
423
#define MI_WAIT_FOR_EVENT			(0x03<<23)
424
 
4375 Serge 425
static bool sna_emit_wait_for_scanline_hsw(struct sna *sna,
426
                        rect_t *crtc,
427
                        int pipe, int y1, int y2,
428
                        bool full_height)
429
{
430
    uint32_t event;
431
    uint32_t *b;
432
 
433
    if (!sna->kgem.has_secure_batches)
434
        return false;
435
 
436
    b = kgem_get_batch(&sna->kgem);
437
    sna->kgem.nbatch += 17;
438
 
439
    switch (pipe) {
440
    default: assert(0);
441
    case 0: event = 1 << 0; break;
442
    case 1: event = 1 << 8; break;
443
    case 2: event = 1 << 14; break;
444
    }
445
 
446
    b[0] = MI_LOAD_REGISTER_IMM | 1;
447
    b[1] = 0x44050; /* DERRMR */
448
    b[2] = ~event;
449
    b[3] = MI_LOAD_REGISTER_IMM | 1;
450
    b[4] = 0xa188; /* FORCEWAKE_MT */
451
    b[5] = 2 << 16 | 2;
452
 
453
    /* The documentation says that the LOAD_SCAN_LINES command
454
     * always comes in pairs. Don't ask me why. */
455
    switch (pipe) {
456
    default: assert(0);
457
    case 0: event = 0 << 19; break;
458
    case 1: event = 1 << 19; break;
459
    case 2: event = 4 << 19; break;
460
    }
461
    b[8] = b[6] = MI_LOAD_SCAN_LINES_INCL | event;
462
    b[9] = b[7] = (y1 << 16) | (y2-1);
463
 
464
    switch (pipe) {
465
    default: assert(0);
466
    case 0: event = 1 << 0; break;
467
    case 1: event = 1 << 8; break;
468
    case 2: event = 1 << 14; break;
469
    }
470
    b[10] = MI_WAIT_FOR_EVENT | event;
471
 
472
    b[11] = MI_LOAD_REGISTER_IMM | 1;
473
    b[12] = 0xa188; /* FORCEWAKE_MT */
474
    b[13] = 2 << 16;
475
    b[14] = MI_LOAD_REGISTER_IMM | 1;
476
    b[15] = 0x44050; /* DERRMR */
477
    b[16] = ~0;
478
 
479
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
480
    return true;
481
}
482
 
483
 
484
static bool sna_emit_wait_for_scanline_ivb(struct sna *sna,
485
                        rect_t *crtc,
486
                        int pipe, int y1, int y2,
487
                        bool full_height)
488
{
489
    uint32_t *b;
490
    uint32_t event;
491
    uint32_t forcewake;
492
 
493
    if (!sna->kgem.has_secure_batches)
494
        return false;
495
 
496
    assert(y1 >= 0);
497
    assert(y2 > y1);
498
    assert(sna->kgem.mode);
499
 
500
    /* Always program one less than the desired value */
501
    if (--y1 < 0)
502
        y1 = crtc->b;
503
    y2--;
504
 
505
    switch (pipe) {
506
    default:
507
        assert(0);
508
    case 0:
509
        event = 1 << (full_height ? 3 : 0);
510
        break;
511
    case 1:
512
        event = 1 << (full_height ? 11 : 8);
513
        break;
514
    case 2:
515
        event = 1 << (full_height ? 21 : 14);
516
        break;
517
    }
518
 
519
    if (sna->kgem.gen == 071)
520
        forcewake = 0x1300b0; /* FORCEWAKE_VLV */
521
    else
522
        forcewake = 0xa188; /* FORCEWAKE_MT */
523
 
524
    b = kgem_get_batch(&sna->kgem);
525
 
526
    /* Both the LRI and WAIT_FOR_EVENT must be in the same cacheline */
527
    if (((sna->kgem.nbatch + 6) >> 4) != (sna->kgem.nbatch + 10) >> 4) {
528
        int dw = sna->kgem.nbatch + 6;
529
        dw = ALIGN(dw, 16) - dw;
530
        while (dw--)
531
            *b++ = MI_NOOP;
532
    }
533
 
534
    b[0] = MI_LOAD_REGISTER_IMM | 1;
535
    b[1] = 0x44050; /* DERRMR */
536
    b[2] = ~event;
537
    b[3] = MI_LOAD_REGISTER_IMM | 1;
538
    b[4] = forcewake;
539
    b[5] = 2 << 16 | 2;
540
    b[6] = MI_LOAD_REGISTER_IMM | 1;
541
    b[7] = 0x70068 + 0x1000 * pipe;
542
    b[8] = (1 << 31) | (1 << 30) | (y1 << 16) | y2;
543
    b[9] = MI_WAIT_FOR_EVENT | event;
544
    b[10] = MI_LOAD_REGISTER_IMM | 1;
545
    b[11] = forcewake;
546
    b[12] = 2 << 16;
547
    b[13] = MI_LOAD_REGISTER_IMM | 1;
548
    b[14] = 0x44050; /* DERRMR */
549
    b[15] = ~0;
550
 
551
    sna->kgem.nbatch = b - sna->kgem.batch + 16;
552
 
553
    sna->kgem.batch_flags |= I915_EXEC_SECURE;
554
    return true;
555
}
556
 
557
 
4372 Serge 558
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
559
                        rect_t *crtc,
560
					    int pipe, int y1, int y2,
561
					    bool full_height)
4304 Serge 562
{
4372 Serge 563
	uint32_t *b;
564
	uint32_t event;
565
 
566
//	if (!sna->kgem.has_secure_batches)
567
//		return false;
568
 
569
	assert(y1 >= 0);
570
	assert(y2 > y1);
571
	assert(sna->kgem.mode == KGEM_RENDER);
572
 
573
	/* Always program one less than the desired value */
574
	if (--y1 < 0)
575
		y1 = crtc->b;
576
	y2--;
577
 
578
	/* The scanline granularity is 3 bits */
579
	y1 &= ~7;
580
	y2 &= ~7;
581
	if (y2 == y1)
582
		return false;
583
 
584
	event = 1 << (3*full_height + pipe*8);
585
 
586
	b = kgem_get_batch(&sna->kgem);
587
	sna->kgem.nbatch += 10;
588
 
589
	b[0] = MI_LOAD_REGISTER_IMM | 1;
590
	b[1] = 0x44050; /* DERRMR */
591
	b[2] = ~event;
592
	b[3] = MI_LOAD_REGISTER_IMM | 1;
593
	b[4] = 0x4f100; /* magic */
594
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
595
	b[6] = MI_WAIT_FOR_EVENT | event;
596
	b[7] = MI_LOAD_REGISTER_IMM | 1;
597
	b[8] = 0x44050; /* DERRMR */
598
	b[9] = ~0;
599
 
600
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
601
 
602
	return true;
603
}
604
 
4375 Serge 605
static bool sna_emit_wait_for_scanline_gen4(struct sna *sna,
606
                        rect_t *crtc,
607
                        int pipe, int y1, int y2,
608
                        bool full_height)
609
{
610
    uint32_t event;
611
    uint32_t *b;
612
 
613
    if (pipe == 0) {
614
        if (full_height)
615
            event = MI_WAIT_FOR_PIPEA_SVBLANK;
616
        else
617
            event = MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW;
618
    } else {
619
        if (full_height)
620
            event = MI_WAIT_FOR_PIPEB_SVBLANK;
621
        else
622
            event = MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW;
623
    }
624
 
625
    b = kgem_get_batch(&sna->kgem);
626
    sna->kgem.nbatch += 5;
627
 
628
    /* The documentation says that the LOAD_SCAN_LINES command
629
     * always comes in pairs. Don't ask me why. */
630
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
631
    b[3] = b[1] = (y1 << 16) | (y2-1);
632
    b[4] = MI_WAIT_FOR_EVENT | event;
633
 
634
    return true;
635
}
636
 
637
static bool sna_emit_wait_for_scanline_gen2(struct sna *sna,
638
                        rect_t *crtc,
639
                        int pipe, int y1, int y2,
640
                        bool full_height)
641
{
642
    uint32_t *b;
643
 
644
    /*
645
     * Pre-965 doesn't have SVBLANK, so we need a bit
646
     * of extra time for the blitter to start up and
647
     * do its job for a full height blit
648
     */
649
    if (full_height)
650
        y2 -= 2;
651
 
652
    b = kgem_get_batch(&sna->kgem);
653
    sna->kgem.nbatch += 5;
654
 
655
    /* The documentation says that the LOAD_SCAN_LINES command
656
     * always comes in pairs. Don't ask me why. */
657
    b[2] = b[0] = MI_LOAD_SCAN_LINES_INCL | pipe << 20;
658
    b[3] = b[1] = (y1 << 16) | (y2-1);
659
    b[4] = MI_WAIT_FOR_EVENT | 1 << (1 + 4*pipe);
660
 
661
    return true;
662
}
663
 
4372 Serge 664
bool
665
sna_wait_for_scanline(struct sna *sna,
666
		      rect_t *crtc,
667
		      rect_t *clip)
668
{
669
	bool full_height;
670
	int y1, y2, pipe;
671
	bool ret;
672
 
673
//	if (sna->flags & SNA_NO_VSYNC)
674
//		return false;
675
 
676
	/*
677
	 * Make sure we don't wait for a scanline that will
678
	 * never occur
679
	 */
680
	y1 = clip->t - crtc->t;
4374 Serge 681
    if (y1 < 1)
682
        y1 = 1;
4372 Serge 683
	y2 = clip->b - crtc->t;
684
	if (y2 > crtc->b - crtc->t)
685
		y2 = crtc->b - crtc->t;
686
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
687
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
688
 
689
	if (y2 <= y1 + 4)
690
		return false;
691
 
692
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
693
 
694
	pipe = 0;
695
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
696
	     __FUNCTION__, pipe, y1, y2, full_height));
697
 
698
	if (sna->kgem.gen >= 0100)
699
		ret = false;
4375 Serge 700
    else if (sna->kgem.gen >= 075)
701
        ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
702
    else if (sna->kgem.gen >= 070)
703
        ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
4372 Serge 704
	else if (sna->kgem.gen >= 060)
705
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
4375 Serge 706
    else if (sna->kgem.gen >= 040)
707
        ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
708
    else
709
        ret = sna_emit_wait_for_scanline_gen2(sna, crtc, pipe, y1, y2, full_height);
4372 Serge 710
 
711
	return ret;
712
}
713
 
714
 
715
 
716
 
717
 
718
 
719
 
720
 
721
 
722
static const struct intel_device_info intel_generic_info = {
723
	.gen = -1,
724
};
725
 
726
static const struct intel_device_info intel_i915_info = {
727
	.gen = 030,
728
};
729
static const struct intel_device_info intel_i945_info = {
730
	.gen = 031,
731
};
732
 
733
static const struct intel_device_info intel_g33_info = {
734
	.gen = 033,
735
};
736
 
737
static const struct intel_device_info intel_i965_info = {
738
	.gen = 040,
739
};
740
 
741
static const struct intel_device_info intel_g4x_info = {
742
	.gen = 045,
743
};
744
 
745
static const struct intel_device_info intel_ironlake_info = {
746
	.gen = 050,
747
};
748
 
749
static const struct intel_device_info intel_sandybridge_info = {
750
	.gen = 060,
751
};
752
 
753
static const struct intel_device_info intel_ivybridge_info = {
754
	.gen = 070,
755
};
756
 
757
static const struct intel_device_info intel_valleyview_info = {
758
	.gen = 071,
759
};
760
 
761
static const struct intel_device_info intel_haswell_info = {
762
	.gen = 075,
763
};
764
 
765
#define INTEL_DEVICE_MATCH(d,i) \
766
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
767
 
768
 
769
static const struct pci_id_match intel_device_match[] = {
770
 
771
	INTEL_I915G_IDS(&intel_i915_info),
772
	INTEL_I915GM_IDS(&intel_i915_info),
773
	INTEL_I945G_IDS(&intel_i945_info),
774
	INTEL_I945GM_IDS(&intel_i945_info),
775
 
776
	INTEL_G33_IDS(&intel_g33_info),
777
	INTEL_PINEVIEW_IDS(&intel_g33_info),
778
 
779
	INTEL_I965G_IDS(&intel_i965_info),
780
	INTEL_I965GM_IDS(&intel_i965_info),
781
 
782
	INTEL_G45_IDS(&intel_g4x_info),
783
	INTEL_GM45_IDS(&intel_g4x_info),
784
 
785
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
786
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
787
 
788
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
789
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
790
 
791
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
792
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
793
 
794
	INTEL_HSW_D_IDS(&intel_haswell_info),
795
	INTEL_HSW_M_IDS(&intel_haswell_info),
796
 
797
	INTEL_VLV_D_IDS(&intel_valleyview_info),
798
	INTEL_VLV_M_IDS(&intel_valleyview_info),
799
 
800
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
801
 
802
	{ 0, 0, 0 },
803
};
804
 
805
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
806
{
807
    while(list->device_id)
808
    {
809
        if(dev==list->device_id)
810
            return list;
811
        list++;
812
    }
813
    return NULL;
814
}
815
 
816
const struct intel_device_info *
817
intel_detect_chipset(struct pci_device *pci)
818
{
819
    const struct pci_id_match *ent = NULL;
820
 
821
    ent = PciDevMatch(pci->device_id, intel_device_match);
822
 
823
    if(ent != NULL)
824
        return (const struct intel_device_info*)ent->match_data;
825
    else
826
        return &intel_generic_info;
827
}
828
 
829
int intel_get_device_id(int fd)
830
{
831
	struct drm_i915_getparam gp;
832
	int devid = 0;
833
 
834
	memset(&gp, 0, sizeof(gp));
835
	gp.param = I915_PARAM_CHIPSET_ID;
836
	gp.value = &devid;
837
 
838
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
839
		return 0;
840
 
841
	return devid;
842
}
843
 
844
int drmIoctl(int fd, unsigned long request, void *arg)
845
{
846
    ioctl_t  io;
847
 
848
    io.handle   = fd;
849
    io.io_code  = request;
850
    io.input    = arg;
851
    io.inp_size = 64;
852
    io.output   = NULL;
853
    io.out_size = 0;
854
 
855
    return call_service(&io);
856
}
857
 
858
 
859
 
860
bool
861
gen6_composite(struct sna *sna,
862
              uint8_t op,
863
              PixmapPtr src, struct kgem_bo *src_bo,
864
              PixmapPtr mask,struct kgem_bo *mask_bo,
865
              PixmapPtr dst, struct kgem_bo *dst_bo,
866
              int32_t src_x, int32_t src_y,
867
              int32_t msk_x, int32_t msk_y,
868
              int32_t dst_x, int32_t dst_y,
869
              int32_t width, int32_t height,
870
              struct sna_composite_op *tmp);
871
 
872
//#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
873
 
874
 
875
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
876
{
4304 Serge 877
    surface_t *sf;
4372 Serge 878
    struct kgem_bo *bo;
4304 Serge 879
 
880
    sf = malloc(sizeof(*sf));
881
    if(sf == NULL)
882
        goto err_1;
883
 
884
    __lock_acquire_recursive(__sna_lock);
885
 
4372 Serge 886
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
4304 Serge 887
 
4372 Serge 888
    __lock_release_recursive(__sna_lock);
4304 Serge 889
 
890
    sf->width   = bitmap->width;
891
    sf->height  = bitmap->height;
4372 Serge 892
    sf->data    = NULL;
4304 Serge 893
    sf->pitch   = bo->pitch;
894
    sf->bo      = bo;
895
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
896
    sf->flags   = bitmap->flags;
897
 
898
    bitmap->handle = (uint32_t)sf;
899
 
900
    return 0;
901
 
902
err_2:
903
    __lock_release_recursive(__sna_lock);
904
    free(sf);
905
err_1:
906
    return -1;
907
};
908
 
4372 Serge 909
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
4304 Serge 910
{
4372 Serge 911
    surface_t *sf = to_surface(bitmap);
912
    struct kgem_bo *bo = sf->bo;
913
    bo->handle = handle;
914
}
915
 
916
static int sna_create_bitmap(bitmap_t *bitmap)
917
{
4304 Serge 918
    surface_t *sf;
4372 Serge 919
    struct kgem_bo *bo;
4304 Serge 920
 
921
    sf = malloc(sizeof(*sf));
922
    if(sf == NULL)
923
        goto err_1;
924
 
925
    __lock_acquire_recursive(__sna_lock);
926
 
4372 Serge 927
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
928
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
4304 Serge 929
 
4372 Serge 930
    if(bo == NULL)
931
        goto err_2;
4304 Serge 932
 
4372 Serge 933
    void *map = kgem_bo_map(&sna_device->kgem, bo);
934
    if(map == NULL)
935
        goto err_3;
936
 
4304 Serge 937
    sf->width   = bitmap->width;
938
    sf->height  = bitmap->height;
4372 Serge 939
    sf->data    = map;
4304 Serge 940
    sf->pitch   = bo->pitch;
941
    sf->bo      = bo;
942
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
943
    sf->flags   = bitmap->flags;
944
 
945
    bitmap->handle = (uint32_t)sf;
4372 Serge 946
    __lock_release_recursive(__sna_lock);
4304 Serge 947
 
948
    return 0;
949
 
4372 Serge 950
err_3:
951
    kgem_bo_destroy(&sna_device->kgem, bo);
4304 Serge 952
err_2:
953
    __lock_release_recursive(__sna_lock);
954
    free(sf);
955
err_1:
956
    return -1;
957
};
958
 
4372 Serge 959
static int sna_destroy_bitmap(bitmap_t *bitmap)
4304 Serge 960
{
961
    surface_t *sf = to_surface(bitmap);
962
 
963
    __lock_acquire_recursive(__sna_lock);
964
 
965
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
966
 
967
    __lock_release_recursive(__sna_lock);
968
 
969
    free(sf);
970
 
971
    bitmap->handle = -1;
972
    bitmap->data   = (void*)-1;
973
    bitmap->pitch  = -1;
974
 
975
    return 0;
976
};
977
 
4372 Serge 978
static int sna_lock_bitmap(bitmap_t *bitmap)
4304 Serge 979
{
980
    surface_t *sf = to_surface(bitmap);
981
 
982
//    printf("%s\n", __FUNCTION__);
983
    __lock_acquire_recursive(__sna_lock);
984
 
985
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
986
 
987
    __lock_release_recursive(__sna_lock);
988
 
989
    bitmap->data  = sf->data;
990
    bitmap->pitch = sf->pitch;
991
 
992
    return 0;
993
};
994
 
4372 Serge 995
static int sna_resize_bitmap(bitmap_t *bitmap)
4304 Serge 996
{
997
    surface_t *sf = to_surface(bitmap);
998
    struct kgem *kgem = &sna_device->kgem;
999
    struct kgem_bo *bo = sf->bo;
1000
 
1001
    uint32_t   size;
1002
    uint32_t   pitch;
1003
 
4372 Serge 1004
    bitmap->pitch = -1;
4304 Serge 1005
    bitmap->data = (void *) -1;
1006
 
4372 Serge 1007
    size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
1008
                 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
1009
    assert(size && size <= kgem->max_object_size);
4304 Serge 1010
 
1011
    if(sf->bo_size >= size)
1012
    {
1013
        sf->width   = bitmap->width;
1014
        sf->height  = bitmap->height;
1015
        sf->pitch   = pitch;
1016
        bo->pitch   = pitch;
1017
 
4372 Serge 1018
        return 0;
4304 Serge 1019
    }
1020
    else
1021
    {
1022
        __lock_acquire_recursive(__sna_lock);
1023
 
1024
        sna_bo_destroy(kgem, bo);
1025
 
1026
        sf->bo = NULL;
1027
 
1028
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
1029
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
1030
 
1031
        if(bo == NULL)
1032
        {
1033
            __lock_release_recursive(__sna_lock);
1034
            return -1;
1035
        };
1036
 
1037
        void *map = kgem_bo_map(kgem, bo);
1038
        if(map == NULL)
1039
        {
1040
            sna_bo_destroy(kgem, bo);
1041
            __lock_release_recursive(__sna_lock);
1042
            return -1;
1043
        };
1044
 
1045
        __lock_release_recursive(__sna_lock);
1046
 
1047
        sf->width   = bitmap->width;
1048
        sf->height  = bitmap->height;
1049
        sf->data    = map;
1050
        sf->pitch   = bo->pitch;
1051
        sf->bo      = bo;
1052
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
1053
    }
1054
 
1055
    return 0;
1056
};
1057
 
1058
 
1059
 
1060
int sna_create_mask()
1061
{
4372 Serge 1062
    struct kgem_bo *bo;
4304 Serge 1063
 
1064
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
1065
 
1066
    __lock_acquire_recursive(__sna_lock);
1067
 
1068
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
1069
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
1070
 
1071
    if(unlikely(bo == NULL))
1072
        goto err_1;
1073
 
1074
    int *map = kgem_bo_map(&sna_device->kgem, bo);
1075
    if(map == NULL)
1076
        goto err_2;
1077
 
1078
    __lock_release_recursive(__sna_lock);
1079
 
1080
    memset(map, 0, bo->pitch * sna_fb.height);
1081
 
1082
    tls_set(tls_mask, bo);
1083
 
1084
    return 0;
1085
 
1086
err_2:
1087
    kgem_bo_destroy(&sna_device->kgem, bo);
1088
err_1:
1089
    __lock_release_recursive(__sna_lock);
1090
    return -1;
1091
};
1092
 
1093
 
4368 Serge 1094
 
4304 Serge 1095
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
1096
                  int w, int h, int src_x, int src_y)
1097
 
1098
{
1099
    surface_t *sf = to_surface(bitmap);
1100
 
1101
    struct drm_i915_mask_update update;
1102
 
1103
    struct sna_composite_op composite;
1104
    struct _Pixmap src, dst, mask;
1105
    struct kgem_bo *src_bo, *mask_bo;
1106
    int winx, winy;
1107
 
1108
    char proc_info[1024];
1109
 
1110
    get_proc_info(proc_info);
1111
 
1112
    winx = *(uint32_t*)(proc_info+34);
1113
    winy = *(uint32_t*)(proc_info+38);
1114
//    winw = *(uint32_t*)(proc_info+42)+1;
1115
//    winh = *(uint32_t*)(proc_info+46)+1;
1116
 
1117
    mask_bo = tls_get(tls_mask);
1118
 
1119
    if(unlikely(mask_bo == NULL))
1120
    {
1121
        sna_create_mask();
1122
        mask_bo = tls_get(tls_mask);
1123
        if( mask_bo == NULL)
1124
            return -1;
1125
    };
1126
 
1127
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
1128
    {
1129
        __lock_acquire_recursive(__sna_lock);
1130
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
1131
        __lock_release_recursive(__sna_lock);
1132
 
1133
        sna_create_mask();
1134
        mask_bo = tls_get(tls_mask);
1135
        if( mask_bo == NULL)
1136
            return -1;
1137
    }
1138
 
1139
    VG_CLEAR(update);
4372 Serge 1140
    update.handle = mask_bo->handle;
1141
    update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
1142
    drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
4304 Serge 1143
    mask_bo->pitch = update.bo_pitch;
1144
 
1145
    memset(&src, 0, sizeof(src));
1146
    memset(&dst, 0, sizeof(dst));
1147
    memset(&mask, 0, sizeof(dst));
1148
 
1149
    src.drawable.bitsPerPixel = 32;
1150
 
1151
    src.drawable.width  = sf->width;
1152
    src.drawable.height = sf->height;
1153
 
1154
    dst.drawable.bitsPerPixel = 32;
1155
    dst.drawable.width  = sna_fb.width;
1156
    dst.drawable.height = sna_fb.height;
1157
 
1158
    mask.drawable.bitsPerPixel = 8;
1159
    mask.drawable.width  = update.width;
1160
    mask.drawable.height = update.height;
1161
 
1162
    memset(&composite, 0, sizeof(composite));
1163
 
1164
    src_bo = sf->bo;
1165
 
1166
    __lock_acquire_recursive(__sna_lock);
1167
 
4375 Serge 1168
#if 0
4368 Serge 1169
    {
1170
        rect_t crtc, clip;
4304 Serge 1171
 
4368 Serge 1172
        crtc.l = 0;
1173
        crtc.t = 0;
1174
        crtc.r = sna_fb.width-1;
1175
        crtc.b = sna_fb.height-1;
1176
 
1177
        clip.l = winx+dst_x;
1178
        clip.t = winy+dst_y;
1179
        clip.r = clip.l+w-1;
1180
        clip.b = clip.t+h-1;
1181
 
1182
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
1183
        sna_wait_for_scanline(sna_device, &crtc, &clip);
1184
    }
4374 Serge 1185
#endif
4368 Serge 1186
 
4304 Serge 1187
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
4372 Serge 1188
              &src, src_bo,
1189
              &mask, mask_bo,
1190
              &dst, sna_fb.fb_bo,
4304 Serge 1191
              src_x, src_y,
1192
              dst_x, dst_y,
1193
              winx+dst_x, winy+dst_y,
1194
              w, h,
1195
              &composite) )
1196
    {
4372 Serge 1197
        struct sna_composite_rectangles r;
4304 Serge 1198
 
4372 Serge 1199
        r.src.x = src_x;
1200
        r.src.y = src_y;
1201
        r.mask.x = dst_x;
1202
        r.mask.y = dst_y;
1203
        r.dst.x = winx+dst_x;
1204
        r.dst.y = winy+dst_y;
1205
        r.width  = w;
1206
        r.height = h;
4304 Serge 1207
 
1208
        composite.blt(sna_device, &composite, &r);
1209
        composite.done(sna_device, &composite);
1210
 
1211
    };
1212
 
1213
    kgem_submit(&sna_device->kgem);
1214
 
1215
    __lock_release_recursive(__sna_lock);
1216
 
1217
    bitmap->data   = (void*)-1;
1218
    bitmap->pitch  = -1;
1219
 
1220
    return 0;
1221
}
1222
 
1223
 
4372 Serge 1224
static void sna_fini()
1225
{
1226
    ENTER();
4304 Serge 1227
 
4372 Serge 1228
    if( sna_device )
1229
    {
1230
        struct kgem_bo *mask;
4304 Serge 1231
 
4372 Serge 1232
        __lock_acquire_recursive(__sna_lock);
4304 Serge 1233
 
4372 Serge 1234
        mask = tls_get(tls_mask);
4304 Serge 1235
 
4372 Serge 1236
        sna_device->render.fini(sna_device);
1237
        if(mask)
1238
            kgem_bo_destroy(&sna_device->kgem, mask);
1239
//        kgem_close_batches(&sna_device->kgem);
1240
        kgem_cleanup_cache(&sna_device->kgem);
4304 Serge 1241
 
4372 Serge 1242
        sna_device = NULL;
1243
        __lock_release_recursive(__sna_lock);
1244
    };
1245
    LEAVE();
1246
}
4304 Serge 1247
 
4372 Serge 1248
uint32_t DrvInit(uint32_t service, struct pix_driver *driver)
1249
{
1250
    ioctl_t   io;
1251
    int caps = 0;
4304 Serge 1252
 
4372 Serge 1253
    static struct pci_device device;
1254
    struct sna *sna;
4304 Serge 1255
 
4372 Serge 1256
    DBG(("%s\n", __FUNCTION__));
4304 Serge 1257
 
4372 Serge 1258
    __lock_acquire_recursive(__sna_lock);
4304 Serge 1259
 
4372 Serge 1260
    if(sna_device)
1261
        goto done;
4304 Serge 1262
 
4372 Serge 1263
    io.handle   = service;
1264
    io.io_code  = SRV_GET_PCI_INFO;
1265
    io.input    = &device;
1266
    io.inp_size = sizeof(device);
1267
    io.output   = NULL;
1268
    io.out_size = 0;
4304 Serge 1269
 
4372 Serge 1270
    if (call_service(&io)!=0)
1271
        goto err1;
4304 Serge 1272
 
4372 Serge 1273
    sna = malloc(sizeof(*sna));
1274
    if (sna == NULL)
1275
        goto err1;
4304 Serge 1276
 
4372 Serge 1277
    memset(sna, 0, sizeof(*sna));
4304 Serge 1278
 
4372 Serge 1279
    sna->cpu_features = sna_cpu_detect();
4304 Serge 1280
 
4372 Serge 1281
    sna->PciInfo = &device;
1282
    sna->info = intel_detect_chipset(sna->PciInfo);
1283
    sna->scrn = service;
4304 Serge 1284
 
4372 Serge 1285
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
4304 Serge 1286
 
4372 Serge 1287
    /* Disable tiling by default */
1288
    sna->tiling = 0;
4304 Serge 1289
 
4372 Serge 1290
    /* Default fail-safe value of 75 Hz */
1291
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
4304 Serge 1292
 
4372 Serge 1293
    sna->flags = 0;
4304 Serge 1294
 
4372 Serge 1295
    sna_accel_init(sna);
4304 Serge 1296
 
4372 Serge 1297
    tls_mask = tls_alloc();
4304 Serge 1298
 
4372 Serge 1299
//    printf("tls mask %x\n", tls_mask);
4304 Serge 1300
 
4372 Serge 1301
    driver->create_bitmap  = sna_create_bitmap;
1302
    driver->destroy_bitmap = sna_destroy_bitmap;
1303
    driver->lock_bitmap    = sna_lock_bitmap;
1304
    driver->blit           = sna_blit_tex;
1305
    driver->resize_bitmap  = sna_resize_bitmap;
1306
    driver->fini           = sna_fini;
1307
done:
1308
    caps = sna_device->render.caps;
4304 Serge 1309
 
4372 Serge 1310
err1:
1311
    __lock_release_recursive(__sna_lock);
4304 Serge 1312
 
4372 Serge 1313
    return caps;
4304 Serge 1314
}
1315
 
1316
 
1317