Subversion Repositories Kolibri OS

Rev

Rev 4304 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/**************************************************************************
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
5
 
6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
40
#include 
41
#include 
42
#include "i915_pciids.h"
43
 
44
#include "compiler.h"
45
#include "sna.h"
46
 
4315 Serge 47
#include 
48
#include 
49
 
4304 Serge 50
#define to_surface(x) (surface_t*)((x)->handle)
51
 
52
static struct sna_fb sna_fb;
53
static int    tls_mask;
54
 
55
int tls_alloc(void);
56
 
57
static inline void *tls_get(int key)
58
{
59
    void *val;
60
    __asm__ __volatile__(
61
    "movl %%fs:(%1), %0"
62
    :"=r"(val)
63
    :"r"(key));
64
 
65
  return val;
66
};
67
 
68
static inline int
69
tls_set(int key, const void *ptr)
70
{
71
    if(!(key & 3))
72
    {
73
        __asm__ __volatile__(
74
        "movl %0, %%fs:(%1)"
75
        ::"r"(ptr),"r"(key));
76
        return 0;
77
    }
78
    else return -1;
79
}
80
 
81
 
82
 
83
 
84
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
85
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
86
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
87
				  unsigned flags, uint32_t width, uint32_t height,
88
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
89
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
90
                        int pitch, int height);
91
 
92
void kgem_close_batches(struct kgem *kgem);
93
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
94
 
95
 
96
static bool sna_solid_cache_init(struct sna *sna);
97
 
98
struct sna *sna_device;
99
 
100
__LOCK_INIT_RECURSIVE(, __sna_lock);
101
 
102
static void no_render_reset(struct sna *sna)
103
{
104
	(void)sna;
105
}
106
 
107
static void no_render_flush(struct sna *sna)
108
{
109
	(void)sna;
110
}
111
 
112
static void
113
no_render_context_switch(struct kgem *kgem,
114
			 int new_mode)
115
{
116
	if (!kgem->nbatch)
117
		return;
118
 
119
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
120
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
121
		_kgem_submit(kgem);
122
	}
123
 
124
	(void)new_mode;
125
}
126
 
127
static void
128
no_render_retire(struct kgem *kgem)
129
{
130
	(void)kgem;
131
}
132
 
133
static void
134
no_render_expire(struct kgem *kgem)
135
{
136
	(void)kgem;
137
}
138
 
139
static void
140
no_render_fini(struct sna *sna)
141
{
142
	(void)sna;
143
}
144
 
145
const char *no_render_init(struct sna *sna)
146
{
147
    struct sna_render *render = &sna->render;
148
 
149
    memset (render,0, sizeof (*render));
150
 
151
    render->prefer_gpu = PREFER_GPU_BLT;
152
 
153
    render->vertices = render->vertex_data;
154
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
155
 
156
    render->reset = no_render_reset;
157
	render->flush = no_render_flush;
158
	render->fini = no_render_fini;
159
 
160
	sna->kgem.context_switch = no_render_context_switch;
161
	sna->kgem.retire = no_render_retire;
162
	sna->kgem.expire = no_render_expire;
163
 
164
	sna->kgem.mode = KGEM_RENDER;
165
	sna->kgem.ring = KGEM_RENDER;
166
 
167
	sna_vertex_init(sna);
168
	return "generic";
169
 }
170
 
171
void sna_vertex_init(struct sna *sna)
172
{
173
//    pthread_mutex_init(&sna->render.lock, NULL);
174
//    pthread_cond_init(&sna->render.wait, NULL);
175
    sna->render.active = 0;
176
}
177
 
178
int sna_accel_init(struct sna *sna)
179
{
180
    const char *backend;
181
 
182
	backend = no_render_init(sna);
183
	if (sna->info->gen >= 0100)
184
		(void)backend;
185
	else if (sna->info->gen >= 070)
186
		backend = gen7_render_init(sna, backend);
187
	else if (sna->info->gen >= 060)
188
		backend = gen6_render_init(sna, backend);
189
	else if (sna->info->gen >= 050)
190
		backend = gen5_render_init(sna, backend);
191
	else if (sna->info->gen >= 040)
192
		backend = gen4_render_init(sna, backend);
193
	else if (sna->info->gen >= 030)
194
		backend = gen3_render_init(sna, backend);
195
 
196
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
197
	     __FUNCTION__, backend, sna->render.prefer_gpu));
198
 
199
	kgem_reset(&sna->kgem);
200
 
201
    sna_device = sna;
202
 
203
    return kgem_init_fb(&sna->kgem, &sna_fb);
204
}
205
 
206
int sna_init(uint32_t service)
207
{
208
    ioctl_t   io;
209
    int caps = 0;
210
 
211
    static struct pci_device device;
212
    struct sna *sna;
213
 
214
    DBG(("%s\n", __FUNCTION__));
215
 
216
    __lock_acquire_recursive(__sna_lock);
217
 
218
    if(sna_device)
219
        goto done;
220
 
221
    io.handle   = service;
222
    io.io_code  = SRV_GET_PCI_INFO;
223
    io.input    = &device;
224
    io.inp_size = sizeof(device);
225
    io.output   = NULL;
226
    io.out_size = 0;
227
 
228
    if (call_service(&io)!=0)
229
        goto err1;
230
 
231
    sna = malloc(sizeof(*sna));
232
    if (sna == NULL)
233
        goto err1;
234
 
235
    memset(sna, 0, sizeof(*sna));
236
 
237
    sna->cpu_features = sna_cpu_detect();
238
 
239
    sna->PciInfo = &device;
240
  	sna->info = intel_detect_chipset(sna->PciInfo);
241
    sna->scrn = service;
242
 
243
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
244
 
245
 
246
    /* Disable tiling by default */
247
    sna->tiling = 0;
248
 
249
    /* Default fail-safe value of 75 Hz */
250
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
251
 
252
    sna->flags = 0;
253
 
254
    sna_accel_init(sna);
255
 
256
    tls_mask = tls_alloc();
257
 
258
//    printf("tls mask %x\n", tls_mask);
259
 
260
done:
261
    caps = sna_device->render.caps;
262
 
263
err1:
264
    __lock_release_recursive(__sna_lock);
265
 
266
    return caps;
267
}
268
 
269
void sna_fini()
270
{
271
    if( sna_device )
272
    {
273
        struct kgem_bo *mask;
274
 
275
        __lock_acquire_recursive(__sna_lock);
276
 
277
        mask = tls_get(tls_mask);
278
 
279
        sna_device->render.fini(sna_device);
280
        if(mask)
281
            kgem_bo_destroy(&sna_device->kgem, mask);
282
        kgem_close_batches(&sna_device->kgem);
283
   	    kgem_cleanup_cache(&sna_device->kgem);
284
 
285
   	    sna_device = NULL;
286
        __lock_release_recursive(__sna_lock);
287
    };
288
}
289
 
290
#if 0
291
 
292
static bool sna_solid_cache_init(struct sna *sna)
293
{
294
    struct sna_solid_cache *cache = &sna->render.solid_cache;
295
 
296
    DBG(("%s\n", __FUNCTION__));
297
 
298
    cache->cache_bo =
299
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
300
    if (!cache->cache_bo)
301
        return FALSE;
302
 
303
    /*
304
     * Initialise [0] with white since it is very common and filling the
305
     * zeroth slot simplifies some of the checks.
306
     */
307
    cache->color[0] = 0xffffffff;
308
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
309
    cache->bo[0]->pitch = 4;
310
    cache->dirty = 1;
311
    cache->size = 1;
312
    cache->last = 0;
313
 
314
    return TRUE;
315
}
316
 
317
void
318
sna_render_flush_solid(struct sna *sna)
319
{
320
    struct sna_solid_cache *cache = &sna->render.solid_cache;
321
 
322
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
323
    assert(cache->dirty);
324
    assert(cache->size);
325
 
326
    kgem_bo_write(&sna->kgem, cache->cache_bo,
327
              cache->color, cache->size*sizeof(uint32_t));
328
    cache->dirty = 0;
329
    cache->last = 0;
330
}
331
 
332
static void
333
sna_render_finish_solid(struct sna *sna, bool force)
334
{
335
    struct sna_solid_cache *cache = &sna->render.solid_cache;
336
    int i;
337
 
338
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
339
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
340
 
341
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
342
        return;
343
 
344
    if (cache->dirty)
345
        sna_render_flush_solid(sna);
346
 
347
    for (i = 0; i < cache->size; i++) {
348
        if (cache->bo[i] == NULL)
349
            continue;
350
 
351
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
352
        cache->bo[i] = NULL;
353
    }
354
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
355
 
356
    DBG(("sna_render_finish_solid reset\n"));
357
 
358
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
359
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
360
    cache->bo[0]->pitch = 4;
361
    if (force)
362
        cache->size = 1;
363
}
364
 
365
 
366
struct kgem_bo *
367
sna_render_get_solid(struct sna *sna, uint32_t color)
368
{
369
    struct sna_solid_cache *cache = &sna->render.solid_cache;
370
    int i;
371
 
372
    DBG(("%s: %08x\n", __FUNCTION__, color));
373
 
374
//    if ((color & 0xffffff) == 0) /* alpha only */
375
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
376
 
377
    if (color == 0xffffffff) {
378
        DBG(("%s(white)\n", __FUNCTION__));
379
        return kgem_bo_reference(cache->bo[0]);
380
    }
381
 
382
    if (cache->color[cache->last] == color) {
383
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
384
             cache->last, color));
385
        return kgem_bo_reference(cache->bo[cache->last]);
386
    }
387
 
388
    for (i = 1; i < cache->size; i++) {
389
        if (cache->color[i] == color) {
390
            if (cache->bo[i] == NULL) {
391
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
392
                     i, color));
393
                goto create;
394
            } else {
395
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
396
                     i, color));
397
                goto done;
398
            }
399
        }
400
    }
401
 
402
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
403
 
404
    i = cache->size++;
405
    cache->color[i] = color;
406
    cache->dirty = 1;
407
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
408
 
409
create:
410
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
411
                     i*sizeof(uint32_t), sizeof(uint32_t));
412
    cache->bo[i]->pitch = 4;
413
 
414
done:
415
    cache->last = i;
416
    return kgem_bo_reference(cache->bo[i]);
417
}
418
 
419
#endif
420
 
421
 
422
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
423
                  int w, int h, int src_x, int src_y)
424
 
425
{
426
    struct sna_copy_op copy;
427
    struct _Pixmap src, dst;
428
    struct kgem_bo *src_bo;
429
 
430
    char proc_info[1024];
431
    int winx, winy;
432
 
433
    get_proc_info(proc_info);
434
 
435
    winx = *(uint32_t*)(proc_info+34);
436
    winy = *(uint32_t*)(proc_info+38);
437
 
438
    memset(&src, 0, sizeof(src));
439
    memset(&dst, 0, sizeof(dst));
440
 
441
    src.drawable.bitsPerPixel = 32;
442
    src.drawable.width  = src_bitmap->width;
443
    src.drawable.height = src_bitmap->height;
444
 
445
    dst.drawable.bitsPerPixel = 32;
446
    dst.drawable.width  = sna_fb.width;
447
    dst.drawable.height = sna_fb.height;
448
 
449
    memset(©, 0, sizeof(copy));
450
 
451
    src_bo = (struct kgem_bo*)src_bitmap->handle;
452
 
453
    if( sna_device->render.copy(sna_device, GXcopy,
454
                                &src, src_bo,
455
                                &dst, sna_fb.fb_bo, ©) )
456
    {
457
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
458
        copy.done(sna_device, ©);
459
    }
460
 
461
    kgem_submit(&sna_device->kgem);
462
 
463
    return 0;
464
 
465
//    __asm__ __volatile__("int3");
466
 
467
};
468
 
469
typedef struct
470
{
471
    uint32_t        width;
472
    uint32_t        height;
473
    void           *data;
474
    uint32_t        pitch;
475
    struct kgem_bo *bo;
476
    uint32_t        bo_size;
477
    uint32_t        flags;
478
}surface_t;
479
 
480
 
481
 
482
int sna_create_bitmap(bitmap_t *bitmap)
483
{
484
    surface_t *sf;
485
	struct kgem_bo *bo;
486
 
487
    sf = malloc(sizeof(*sf));
488
    if(sf == NULL)
489
        goto err_1;
490
 
491
    __lock_acquire_recursive(__sna_lock);
492
 
493
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
494
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
495
 
496
    if(bo == NULL)
497
        goto err_2;
498
 
499
    void *map = kgem_bo_map(&sna_device->kgem, bo);
500
    if(map == NULL)
501
        goto err_3;
502
 
503
    sf->width   = bitmap->width;
504
    sf->height  = bitmap->height;
505
    sf->data    = map;
506
    sf->pitch   = bo->pitch;
507
    sf->bo      = bo;
508
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
509
    sf->flags   = bitmap->flags;
510
 
511
    bitmap->handle = (uint32_t)sf;
512
    __lock_release_recursive(__sna_lock);
513
 
514
    return 0;
515
 
516
err_3:
517
    kgem_bo_destroy(&sna_device->kgem, bo);
518
err_2:
519
    __lock_release_recursive(__sna_lock);
520
    free(sf);
521
err_1:
522
    return -1;
523
};
524
 
525
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
526
{
527
    surface_t *sf;
528
	struct kgem_bo *bo;
529
 
530
    sf = malloc(sizeof(*sf));
531
    if(sf == NULL)
532
        goto err_1;
533
 
534
    __lock_acquire_recursive(__sna_lock);
535
 
536
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
537
 
538
    __lock_release_recursive(__sna_lock);
539
 
540
    sf->width   = bitmap->width;
541
    sf->height  = bitmap->height;
542
    sf->data    = NULL;
543
    sf->pitch   = bo->pitch;
544
    sf->bo      = bo;
545
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
546
    sf->flags   = bitmap->flags;
547
 
548
    bitmap->handle = (uint32_t)sf;
549
 
550
    return 0;
551
 
552
err_2:
553
    __lock_release_recursive(__sna_lock);
554
    free(sf);
555
err_1:
556
    return -1;
557
};
558
 
559
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
560
{
561
    surface_t *sf = to_surface(bitmap);
562
    struct kgem_bo *bo = sf->bo;
563
    bo->handle = handle;
564
}
565
 
566
int sna_destroy_bitmap(bitmap_t *bitmap)
567
{
568
    surface_t *sf = to_surface(bitmap);
569
 
570
    __lock_acquire_recursive(__sna_lock);
571
 
572
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
573
 
574
    __lock_release_recursive(__sna_lock);
575
 
576
    free(sf);
577
 
578
    bitmap->handle = -1;
579
    bitmap->data   = (void*)-1;
580
    bitmap->pitch  = -1;
581
 
582
    return 0;
583
};
584
 
585
int sna_lock_bitmap(bitmap_t *bitmap)
586
{
587
    surface_t *sf = to_surface(bitmap);
588
 
589
//    printf("%s\n", __FUNCTION__);
590
    __lock_acquire_recursive(__sna_lock);
591
 
592
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
593
 
594
    __lock_release_recursive(__sna_lock);
595
 
596
    bitmap->data  = sf->data;
597
    bitmap->pitch = sf->pitch;
598
 
599
    return 0;
600
};
601
 
602
int sna_resize_bitmap(bitmap_t *bitmap)
603
{
604
    surface_t *sf = to_surface(bitmap);
605
    struct kgem *kgem = &sna_device->kgem;
606
    struct kgem_bo *bo = sf->bo;
607
 
608
    uint32_t   size;
609
    uint32_t   pitch;
610
 
611
   	bitmap->pitch = -1;
612
    bitmap->data = (void *) -1;
613
 
614
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
615
				 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
616
	assert(size && size <= kgem->max_object_size);
617
 
618
    if(sf->bo_size >= size)
619
    {
620
        sf->width   = bitmap->width;
621
        sf->height  = bitmap->height;
622
        sf->pitch   = pitch;
623
        bo->pitch   = pitch;
624
 
625
	    return 0;
626
    }
627
    else
628
    {
629
        __lock_acquire_recursive(__sna_lock);
630
 
631
        sna_bo_destroy(kgem, bo);
632
 
633
        sf->bo = NULL;
634
 
635
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
636
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
637
 
638
        if(bo == NULL)
639
        {
640
            __lock_release_recursive(__sna_lock);
641
            return -1;
642
        };
643
 
644
        void *map = kgem_bo_map(kgem, bo);
645
        if(map == NULL)
646
        {
647
            sna_bo_destroy(kgem, bo);
648
            __lock_release_recursive(__sna_lock);
649
            return -1;
650
        };
651
 
652
        __lock_release_recursive(__sna_lock);
653
 
654
        sf->width   = bitmap->width;
655
        sf->height  = bitmap->height;
656
        sf->data    = map;
657
        sf->pitch   = bo->pitch;
658
        sf->bo      = bo;
659
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
660
    }
661
 
662
    return 0;
663
};
664
 
665
 
666
 
667
int sna_create_mask()
668
{
669
	struct kgem_bo *bo;
670
 
671
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
672
 
673
    __lock_acquire_recursive(__sna_lock);
674
 
675
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
676
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
677
 
678
    if(unlikely(bo == NULL))
679
        goto err_1;
680
 
681
    int *map = kgem_bo_map(&sna_device->kgem, bo);
682
    if(map == NULL)
683
        goto err_2;
684
 
685
    __lock_release_recursive(__sna_lock);
686
 
687
    memset(map, 0, bo->pitch * sna_fb.height);
688
 
689
    tls_set(tls_mask, bo);
690
 
691
    return 0;
692
 
693
err_2:
694
    kgem_bo_destroy(&sna_device->kgem, bo);
695
err_1:
696
    __lock_release_recursive(__sna_lock);
697
    return -1;
698
};
699
 
700
 
701
bool
702
gen6_composite(struct sna *sna,
703
              uint8_t op,
704
		      PixmapPtr src, struct kgem_bo *src_bo,
705
		      PixmapPtr mask,struct kgem_bo *mask_bo,
706
		      PixmapPtr dst, struct kgem_bo *dst_bo,
707
              int32_t src_x, int32_t src_y,
708
              int32_t msk_x, int32_t msk_y,
709
              int32_t dst_x, int32_t dst_y,
710
              int32_t width, int32_t height,
711
              struct sna_composite_op *tmp);
712
 
713
 
714
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
715
 
716
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
717
                  int w, int h, int src_x, int src_y)
718
 
719
{
720
    surface_t *sf = to_surface(bitmap);
721
 
722
    struct drm_i915_mask_update update;
723
 
724
    struct sna_composite_op composite;
725
    struct _Pixmap src, dst, mask;
726
    struct kgem_bo *src_bo, *mask_bo;
727
    int winx, winy;
728
 
729
    char proc_info[1024];
730
 
731
    get_proc_info(proc_info);
732
 
733
    winx = *(uint32_t*)(proc_info+34);
734
    winy = *(uint32_t*)(proc_info+38);
735
//    winw = *(uint32_t*)(proc_info+42)+1;
736
//    winh = *(uint32_t*)(proc_info+46)+1;
737
 
738
    mask_bo = tls_get(tls_mask);
739
 
740
    if(unlikely(mask_bo == NULL))
741
    {
742
        sna_create_mask();
743
        mask_bo = tls_get(tls_mask);
744
        if( mask_bo == NULL)
745
            return -1;
746
    };
747
 
748
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
749
    {
750
        __lock_acquire_recursive(__sna_lock);
751
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
752
        __lock_release_recursive(__sna_lock);
753
 
754
        sna_create_mask();
755
        mask_bo = tls_get(tls_mask);
756
        if( mask_bo == NULL)
757
            return -1;
758
    }
759
 
760
    VG_CLEAR(update);
761
	update.handle = mask_bo->handle;
762
	update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
763
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
764
    mask_bo->pitch = update.bo_pitch;
765
 
766
    memset(&src, 0, sizeof(src));
767
    memset(&dst, 0, sizeof(dst));
768
    memset(&mask, 0, sizeof(dst));
769
 
770
    src.drawable.bitsPerPixel = 32;
771
 
772
    src.drawable.width  = sf->width;
773
    src.drawable.height = sf->height;
774
 
775
    dst.drawable.bitsPerPixel = 32;
776
    dst.drawable.width  = sna_fb.width;
777
    dst.drawable.height = sna_fb.height;
778
 
779
    mask.drawable.bitsPerPixel = 8;
780
    mask.drawable.width  = update.width;
781
    mask.drawable.height = update.height;
782
 
783
    memset(&composite, 0, sizeof(composite));
784
 
785
    src_bo = sf->bo;
786
 
787
    __lock_acquire_recursive(__sna_lock);
788
 
789
 
790
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
791
		      &src, src_bo,
792
		      &mask, mask_bo,
793
		      &dst, sna_fb.fb_bo,
794
              src_x, src_y,
795
              dst_x, dst_y,
796
              winx+dst_x, winy+dst_y,
797
              w, h,
798
              &composite) )
799
    {
800
	    struct sna_composite_rectangles r;
801
 
802
	    r.src.x = src_x;
803
	    r.src.y = src_y;
804
	    r.mask.x = dst_x;
805
	    r.mask.y = dst_y;
806
		r.dst.x = winx+dst_x;
807
	    r.dst.y = winy+dst_y;
808
	    r.width  = w;
809
	    r.height = h;
810
 
811
        composite.blt(sna_device, &composite, &r);
812
        composite.done(sna_device, &composite);
813
 
814
    };
815
 
816
    kgem_submit(&sna_device->kgem);
817
 
818
    __lock_release_recursive(__sna_lock);
819
 
820
    bitmap->data   = (void*)-1;
821
    bitmap->pitch  = -1;
822
 
823
    return 0;
824
}
825
 
826
 
827
 
828
 
829
 
830
 
831
 
832
static const struct intel_device_info intel_generic_info = {
833
	.gen = -1,
834
};
835
 
836
static const struct intel_device_info intel_i915_info = {
837
	.gen = 030,
838
};
839
static const struct intel_device_info intel_i945_info = {
840
	.gen = 031,
841
};
842
 
843
static const struct intel_device_info intel_g33_info = {
844
	.gen = 033,
845
};
846
 
847
static const struct intel_device_info intel_i965_info = {
848
	.gen = 040,
849
};
850
 
851
static const struct intel_device_info intel_g4x_info = {
852
	.gen = 045,
853
};
854
 
855
static const struct intel_device_info intel_ironlake_info = {
856
	.gen = 050,
857
};
858
 
859
static const struct intel_device_info intel_sandybridge_info = {
860
	.gen = 060,
861
};
862
 
863
static const struct intel_device_info intel_ivybridge_info = {
864
	.gen = 070,
865
};
866
 
867
static const struct intel_device_info intel_valleyview_info = {
868
	.gen = 071,
869
};
870
 
871
static const struct intel_device_info intel_haswell_info = {
872
	.gen = 075,
873
};
874
 
875
#define INTEL_DEVICE_MATCH(d,i) \
876
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
877
 
878
 
879
static const struct pci_id_match intel_device_match[] = {
880
 
881
	INTEL_I915G_IDS(&intel_i915_info),
882
	INTEL_I915GM_IDS(&intel_i915_info),
883
	INTEL_I945G_IDS(&intel_i945_info),
884
	INTEL_I945GM_IDS(&intel_i945_info),
885
 
886
	INTEL_G33_IDS(&intel_g33_info),
887
	INTEL_PINEVIEW_IDS(&intel_g33_info),
888
 
889
	INTEL_I965G_IDS(&intel_i965_info),
890
	INTEL_I965GM_IDS(&intel_i965_info),
891
 
892
	INTEL_G45_IDS(&intel_g4x_info),
893
	INTEL_GM45_IDS(&intel_g4x_info),
894
 
895
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
896
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
897
 
898
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
899
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
900
 
901
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
902
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
903
 
904
	INTEL_HSW_D_IDS(&intel_haswell_info),
905
	INTEL_HSW_M_IDS(&intel_haswell_info),
906
 
907
	INTEL_VLV_D_IDS(&intel_valleyview_info),
908
	INTEL_VLV_M_IDS(&intel_valleyview_info),
909
 
910
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
911
 
912
	{ 0, 0, 0 },
913
};
914
 
915
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
916
{
917
    while(list->device_id)
918
    {
919
        if(dev==list->device_id)
920
            return list;
921
        list++;
922
    }
923
    return NULL;
924
}
925
 
926
const struct intel_device_info *
927
intel_detect_chipset(struct pci_device *pci)
928
{
929
    const struct pci_id_match *ent = NULL;
930
 
931
    ent = PciDevMatch(pci->device_id, intel_device_match);
932
 
933
    if(ent != NULL)
934
        return (const struct intel_device_info*)ent->match_data;
935
    else
936
        return &intel_generic_info;
937
}
938
 
939
int intel_get_device_id(int fd)
940
{
941
	struct drm_i915_getparam gp;
942
	int devid = 0;
943
 
944
	memset(&gp, 0, sizeof(gp));
945
	gp.param = I915_PARAM_CHIPSET_ID;
946
	gp.value = &devid;
947
 
948
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
949
		return 0;
950
 
951
	return devid;
952
}
953
 
954
int drmIoctl(int fd, unsigned long request, void *arg)
955
{
956
    ioctl_t  io;
957
 
958
    io.handle   = fd;
959
    io.io_code  = request;
960
    io.input    = arg;
961
    io.inp_size = 64;
962
    io.output   = NULL;
963
    io.out_size = 0;
964
 
965
    return call_service(&io);
966
}
967