Subversion Repositories Kolibri OS

Rev

Rev 4315 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/**************************************************************************
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
5
 
6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
40
#include 
41
#include 
42
#include 
43
#include 
44
#include "i915_pciids.h"
45
 
46
#include "compiler.h"
47
#include "sna.h"
48
#include "intel_driver.h"
49
 
50
#define to_surface(x) (surface_t*)((x)->handle)
51
 
52
static struct sna_fb sna_fb;
53
static int    tls_mask;
54
 
55
int tls_alloc(void);
56
 
57
static inline void *tls_get(int key)
58
{
59
    void *val;
60
    __asm__ __volatile__(
61
    "movl %%fs:(%1), %0"
62
    :"=r"(val)
63
    :"r"(key));
64
 
65
  return val;
66
};
67
 
68
static inline int
69
tls_set(int key, const void *ptr)
70
{
71
    if(!(key & 3))
72
    {
73
        __asm__ __volatile__(
74
        "movl %0, %%fs:(%1)"
75
        ::"r"(ptr),"r"(key));
76
        return 0;
77
    }
78
    else return -1;
79
}
80
 
81
 
82
 
83
 
84
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
85
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
86
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
87
				  unsigned flags, uint32_t width, uint32_t height,
88
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
89
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
90
                        int pitch, int height);
91
 
92
void kgem_close_batches(struct kgem *kgem);
93
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
94
 
95
const struct intel_device_info *
96
intel_detect_chipset(struct pci_device *pci);
97
 
98
static bool sna_solid_cache_init(struct sna *sna);
99
 
100
struct sna *sna_device;
101
 
102
__LOCK_INIT_RECURSIVE(, __sna_lock);
103
 
104
static void no_render_reset(struct sna *sna)
105
{
106
	(void)sna;
107
}
108
 
109
static void no_render_flush(struct sna *sna)
110
{
111
	(void)sna;
112
}
113
 
114
static void
115
no_render_context_switch(struct kgem *kgem,
116
			 int new_mode)
117
{
118
	if (!kgem->nbatch)
119
		return;
120
 
121
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
122
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
123
		_kgem_submit(kgem);
124
	}
125
 
126
	(void)new_mode;
127
}
128
 
129
static void
130
no_render_retire(struct kgem *kgem)
131
{
132
	(void)kgem;
133
}
134
 
135
static void
136
no_render_expire(struct kgem *kgem)
137
{
138
	(void)kgem;
139
}
140
 
141
static void
142
no_render_fini(struct sna *sna)
143
{
144
	(void)sna;
145
}
146
 
147
const char *no_render_init(struct sna *sna)
148
{
149
    struct sna_render *render = &sna->render;
150
 
151
    memset (render,0, sizeof (*render));
152
 
153
    render->prefer_gpu = PREFER_GPU_BLT;
154
 
155
    render->vertices = render->vertex_data;
156
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
157
 
158
    render->reset = no_render_reset;
159
	render->flush = no_render_flush;
160
	render->fini = no_render_fini;
161
 
162
	sna->kgem.context_switch = no_render_context_switch;
163
	sna->kgem.retire = no_render_retire;
164
	sna->kgem.expire = no_render_expire;
165
 
166
	sna->kgem.mode = KGEM_RENDER;
167
	sna->kgem.ring = KGEM_RENDER;
168
 
169
	sna_vertex_init(sna);
170
	return "generic";
171
 }
172
 
173
void sna_vertex_init(struct sna *sna)
174
{
175
//    pthread_mutex_init(&sna->render.lock, NULL);
176
//    pthread_cond_init(&sna->render.wait, NULL);
177
    sna->render.active = 0;
178
}
179
 
180
int sna_accel_init(struct sna *sna)
181
{
182
    const char *backend;
183
 
184
	backend = no_render_init(sna);
185
	if (sna->info->gen >= 0100)
186
		(void)backend;
187
	else if (sna->info->gen >= 070)
188
		backend = gen7_render_init(sna, backend);
189
	else if (sna->info->gen >= 060)
190
		backend = gen6_render_init(sna, backend);
191
	else if (sna->info->gen >= 050)
192
		backend = gen5_render_init(sna, backend);
193
	else if (sna->info->gen >= 040)
194
		backend = gen4_render_init(sna, backend);
195
	else if (sna->info->gen >= 030)
196
		backend = gen3_render_init(sna, backend);
197
 
198
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
199
	     __FUNCTION__, backend, sna->render.prefer_gpu));
200
 
201
	kgem_reset(&sna->kgem);
202
 
203
    sna_device = sna;
204
 
205
    return kgem_init_fb(&sna->kgem, &sna_fb);
206
}
207
 
208
int sna_init(uint32_t service)
209
{
210
    ioctl_t   io;
211
    int caps = 0;
212
 
213
    static struct pci_device device;
214
    struct sna *sna;
215
 
216
    DBG(("%s\n", __FUNCTION__));
217
 
218
    __lock_acquire_recursive(__sna_lock);
219
 
220
    if(sna_device)
221
        goto done;
222
 
223
    io.handle   = service;
224
    io.io_code  = SRV_GET_PCI_INFO;
225
    io.input    = &device;
226
    io.inp_size = sizeof(device);
227
    io.output   = NULL;
228
    io.out_size = 0;
229
 
230
    if (call_service(&io)!=0)
231
        goto err1;
232
 
233
    sna = malloc(sizeof(*sna));
234
    if (sna == NULL)
235
        goto err1;
236
 
237
    memset(sna, 0, sizeof(*sna));
238
 
239
    sna->cpu_features = sna_cpu_detect();
240
 
241
    sna->PciInfo = &device;
242
  	sna->info = intel_detect_chipset(sna->PciInfo);
243
    sna->scrn = service;
244
 
245
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
246
 
247
 
248
    /* Disable tiling by default */
249
    sna->tiling = 0;
250
 
251
    /* Default fail-safe value of 75 Hz */
252
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
253
 
254
    sna->flags = 0;
255
 
256
    sna_accel_init(sna);
257
 
258
    tls_mask = tls_alloc();
259
 
260
//    printf("tls mask %x\n", tls_mask);
261
 
262
done:
263
    caps = sna_device->render.caps;
264
 
265
err1:
266
    __lock_release_recursive(__sna_lock);
267
 
268
    return caps;
269
}
270
 
271
void sna_fini()
272
{
273
    if( sna_device )
274
    {
275
        struct kgem_bo *mask;
276
 
277
        __lock_acquire_recursive(__sna_lock);
278
 
279
        mask = tls_get(tls_mask);
280
 
281
        sna_device->render.fini(sna_device);
282
        if(mask)
283
            kgem_bo_destroy(&sna_device->kgem, mask);
284
        kgem_close_batches(&sna_device->kgem);
285
   	    kgem_cleanup_cache(&sna_device->kgem);
286
 
287
   	    sna_device = NULL;
288
        __lock_release_recursive(__sna_lock);
289
    };
290
}
291
 
292
#if 0
293
 
294
static bool sna_solid_cache_init(struct sna *sna)
295
{
296
    struct sna_solid_cache *cache = &sna->render.solid_cache;
297
 
298
    DBG(("%s\n", __FUNCTION__));
299
 
300
    cache->cache_bo =
301
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
302
    if (!cache->cache_bo)
303
        return FALSE;
304
 
305
    /*
306
     * Initialise [0] with white since it is very common and filling the
307
     * zeroth slot simplifies some of the checks.
308
     */
309
    cache->color[0] = 0xffffffff;
310
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
311
    cache->bo[0]->pitch = 4;
312
    cache->dirty = 1;
313
    cache->size = 1;
314
    cache->last = 0;
315
 
316
    return TRUE;
317
}
318
 
319
void
320
sna_render_flush_solid(struct sna *sna)
321
{
322
    struct sna_solid_cache *cache = &sna->render.solid_cache;
323
 
324
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
325
    assert(cache->dirty);
326
    assert(cache->size);
327
 
328
    kgem_bo_write(&sna->kgem, cache->cache_bo,
329
              cache->color, cache->size*sizeof(uint32_t));
330
    cache->dirty = 0;
331
    cache->last = 0;
332
}
333
 
334
static void
335
sna_render_finish_solid(struct sna *sna, bool force)
336
{
337
    struct sna_solid_cache *cache = &sna->render.solid_cache;
338
    int i;
339
 
340
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
341
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
342
 
343
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
344
        return;
345
 
346
    if (cache->dirty)
347
        sna_render_flush_solid(sna);
348
 
349
    for (i = 0; i < cache->size; i++) {
350
        if (cache->bo[i] == NULL)
351
            continue;
352
 
353
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
354
        cache->bo[i] = NULL;
355
    }
356
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
357
 
358
    DBG(("sna_render_finish_solid reset\n"));
359
 
360
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
361
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
362
    cache->bo[0]->pitch = 4;
363
    if (force)
364
        cache->size = 1;
365
}
366
 
367
 
368
struct kgem_bo *
369
sna_render_get_solid(struct sna *sna, uint32_t color)
370
{
371
    struct sna_solid_cache *cache = &sna->render.solid_cache;
372
    int i;
373
 
374
    DBG(("%s: %08x\n", __FUNCTION__, color));
375
 
376
//    if ((color & 0xffffff) == 0) /* alpha only */
377
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
378
 
379
    if (color == 0xffffffff) {
380
        DBG(("%s(white)\n", __FUNCTION__));
381
        return kgem_bo_reference(cache->bo[0]);
382
    }
383
 
384
    if (cache->color[cache->last] == color) {
385
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
386
             cache->last, color));
387
        return kgem_bo_reference(cache->bo[cache->last]);
388
    }
389
 
390
    for (i = 1; i < cache->size; i++) {
391
        if (cache->color[i] == color) {
392
            if (cache->bo[i] == NULL) {
393
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
394
                     i, color));
395
                goto create;
396
            } else {
397
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
398
                     i, color));
399
                goto done;
400
            }
401
        }
402
    }
403
 
404
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
405
 
406
    i = cache->size++;
407
    cache->color[i] = color;
408
    cache->dirty = 1;
409
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
410
 
411
create:
412
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
413
                     i*sizeof(uint32_t), sizeof(uint32_t));
414
    cache->bo[i]->pitch = 4;
415
 
416
done:
417
    cache->last = i;
418
    return kgem_bo_reference(cache->bo[i]);
419
}
420
 
421
#endif
422
 
423
 
424
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
425
                  int w, int h, int src_x, int src_y)
426
 
427
{
428
    struct sna_copy_op copy;
429
    struct _Pixmap src, dst;
430
    struct kgem_bo *src_bo;
431
 
432
    char proc_info[1024];
433
    int winx, winy;
434
 
435
    get_proc_info(proc_info);
436
 
437
    winx = *(uint32_t*)(proc_info+34);
438
    winy = *(uint32_t*)(proc_info+38);
439
 
440
    memset(&src, 0, sizeof(src));
441
    memset(&dst, 0, sizeof(dst));
442
 
443
    src.drawable.bitsPerPixel = 32;
444
    src.drawable.width  = src_bitmap->width;
445
    src.drawable.height = src_bitmap->height;
446
 
447
    dst.drawable.bitsPerPixel = 32;
448
    dst.drawable.width  = sna_fb.width;
449
    dst.drawable.height = sna_fb.height;
450
 
451
    memset(©, 0, sizeof(copy));
452
 
453
    src_bo = (struct kgem_bo*)src_bitmap->handle;
454
 
455
    if( sna_device->render.copy(sna_device, GXcopy,
456
                                &src, src_bo,
457
                                &dst, sna_fb.fb_bo, ©) )
458
    {
459
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
460
        copy.done(sna_device, ©);
461
    }
462
 
463
    kgem_submit(&sna_device->kgem);
464
 
465
    return 0;
466
 
467
//    __asm__ __volatile__("int3");
468
 
469
};
470
 
471
typedef struct
472
{
473
    uint32_t        width;
474
    uint32_t        height;
475
    void           *data;
476
    uint32_t        pitch;
477
    struct kgem_bo *bo;
478
    uint32_t        bo_size;
479
    uint32_t        flags;
480
}surface_t;
481
 
482
 
483
 
484
int sna_create_bitmap(bitmap_t *bitmap)
485
{
486
    surface_t *sf;
487
	struct kgem_bo *bo;
488
 
489
    sf = malloc(sizeof(*sf));
490
    if(sf == NULL)
491
        goto err_1;
492
 
493
    __lock_acquire_recursive(__sna_lock);
494
 
495
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
496
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
497
 
498
    if(bo == NULL)
499
        goto err_2;
500
 
501
    void *map = kgem_bo_map(&sna_device->kgem, bo);
502
    if(map == NULL)
503
        goto err_3;
504
 
505
    sf->width   = bitmap->width;
506
    sf->height  = bitmap->height;
507
    sf->data    = map;
508
    sf->pitch   = bo->pitch;
509
    sf->bo      = bo;
510
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
511
    sf->flags   = bitmap->flags;
512
 
513
    bitmap->handle = (uint32_t)sf;
514
    __lock_release_recursive(__sna_lock);
515
 
516
    return 0;
517
 
518
err_3:
519
    kgem_bo_destroy(&sna_device->kgem, bo);
520
err_2:
521
    __lock_release_recursive(__sna_lock);
522
    free(sf);
523
err_1:
524
    return -1;
525
};
526
 
527
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
528
{
529
    surface_t *sf;
530
	struct kgem_bo *bo;
531
 
532
    sf = malloc(sizeof(*sf));
533
    if(sf == NULL)
534
        goto err_1;
535
 
536
    __lock_acquire_recursive(__sna_lock);
537
 
538
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
539
 
540
    __lock_release_recursive(__sna_lock);
541
 
542
    sf->width   = bitmap->width;
543
    sf->height  = bitmap->height;
544
    sf->data    = NULL;
545
    sf->pitch   = bo->pitch;
546
    sf->bo      = bo;
547
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
548
    sf->flags   = bitmap->flags;
549
 
550
    bitmap->handle = (uint32_t)sf;
551
 
552
    return 0;
553
 
554
err_2:
555
    __lock_release_recursive(__sna_lock);
556
    free(sf);
557
err_1:
558
    return -1;
559
};
560
 
561
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
562
{
563
    surface_t *sf = to_surface(bitmap);
564
    struct kgem_bo *bo = sf->bo;
565
    bo->handle = handle;
566
}
567
 
568
int sna_destroy_bitmap(bitmap_t *bitmap)
569
{
570
    surface_t *sf = to_surface(bitmap);
571
 
572
    __lock_acquire_recursive(__sna_lock);
573
 
574
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
575
 
576
    __lock_release_recursive(__sna_lock);
577
 
578
    free(sf);
579
 
580
    bitmap->handle = -1;
581
    bitmap->data   = (void*)-1;
582
    bitmap->pitch  = -1;
583
 
584
    return 0;
585
};
586
 
587
int sna_lock_bitmap(bitmap_t *bitmap)
588
{
589
    surface_t *sf = to_surface(bitmap);
590
 
591
//    printf("%s\n", __FUNCTION__);
592
    __lock_acquire_recursive(__sna_lock);
593
 
594
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
595
 
596
    __lock_release_recursive(__sna_lock);
597
 
598
    bitmap->data  = sf->data;
599
    bitmap->pitch = sf->pitch;
600
 
601
    return 0;
602
};
603
 
604
int sna_resize_bitmap(bitmap_t *bitmap)
605
{
606
    surface_t *sf = to_surface(bitmap);
607
    struct kgem *kgem = &sna_device->kgem;
608
    struct kgem_bo *bo = sf->bo;
609
 
610
    uint32_t   size;
611
    uint32_t   pitch;
612
 
613
   	bitmap->pitch = -1;
614
    bitmap->data = (void *) -1;
615
 
616
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
617
				 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
618
	assert(size && size <= kgem->max_object_size);
619
 
620
    if(sf->bo_size >= size)
621
    {
622
        sf->width   = bitmap->width;
623
        sf->height  = bitmap->height;
624
        sf->pitch   = pitch;
625
        bo->pitch   = pitch;
626
 
627
	    return 0;
628
    }
629
    else
630
    {
631
        __lock_acquire_recursive(__sna_lock);
632
 
633
        sna_bo_destroy(kgem, bo);
634
 
635
        sf->bo = NULL;
636
 
637
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
638
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
639
 
640
        if(bo == NULL)
641
        {
642
            __lock_release_recursive(__sna_lock);
643
            return -1;
644
        };
645
 
646
        void *map = kgem_bo_map(kgem, bo);
647
        if(map == NULL)
648
        {
649
            sna_bo_destroy(kgem, bo);
650
            __lock_release_recursive(__sna_lock);
651
            return -1;
652
        };
653
 
654
        __lock_release_recursive(__sna_lock);
655
 
656
        sf->width   = bitmap->width;
657
        sf->height  = bitmap->height;
658
        sf->data    = map;
659
        sf->pitch   = bo->pitch;
660
        sf->bo      = bo;
661
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
662
    }
663
 
664
    return 0;
665
};
666
 
667
 
668
 
669
int sna_create_mask()
670
{
671
	struct kgem_bo *bo;
672
 
673
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
674
 
675
    __lock_acquire_recursive(__sna_lock);
676
 
677
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
678
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
679
 
680
    if(unlikely(bo == NULL))
681
        goto err_1;
682
 
683
    int *map = kgem_bo_map(&sna_device->kgem, bo);
684
    if(map == NULL)
685
        goto err_2;
686
 
687
    __lock_release_recursive(__sna_lock);
688
 
689
    memset(map, 0, bo->pitch * sna_fb.height);
690
 
691
    tls_set(tls_mask, bo);
692
 
693
    return 0;
694
 
695
err_2:
696
    kgem_bo_destroy(&sna_device->kgem, bo);
697
err_1:
698
    __lock_release_recursive(__sna_lock);
699
    return -1;
700
};
701
 
702
 
703
bool
704
gen6_composite(struct sna *sna,
705
              uint8_t op,
706
		      PixmapPtr src, struct kgem_bo *src_bo,
707
		      PixmapPtr mask,struct kgem_bo *mask_bo,
708
		      PixmapPtr dst, struct kgem_bo *dst_bo,
709
              int32_t src_x, int32_t src_y,
710
              int32_t msk_x, int32_t msk_y,
711
              int32_t dst_x, int32_t dst_y,
712
              int32_t width, int32_t height,
713
              struct sna_composite_op *tmp);
714
 
715
 
716
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
717
 
718
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
719
                  int w, int h, int src_x, int src_y)
720
 
721
{
722
    surface_t *sf = to_surface(bitmap);
723
 
724
    struct drm_i915_mask_update update;
725
 
726
    struct sna_composite_op composite;
727
    struct _Pixmap src, dst, mask;
728
    struct kgem_bo *src_bo, *mask_bo;
729
    int winx, winy;
730
 
731
    char proc_info[1024];
732
 
733
    get_proc_info(proc_info);
734
 
735
    winx = *(uint32_t*)(proc_info+34);
736
    winy = *(uint32_t*)(proc_info+38);
737
//    winw = *(uint32_t*)(proc_info+42)+1;
738
//    winh = *(uint32_t*)(proc_info+46)+1;
739
 
740
    mask_bo = tls_get(tls_mask);
741
 
742
    if(unlikely(mask_bo == NULL))
743
    {
744
        sna_create_mask();
745
        mask_bo = tls_get(tls_mask);
746
        if( mask_bo == NULL)
747
            return -1;
748
    };
749
 
750
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
751
    {
752
        __lock_acquire_recursive(__sna_lock);
753
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
754
        __lock_release_recursive(__sna_lock);
755
 
756
        sna_create_mask();
757
        mask_bo = tls_get(tls_mask);
758
        if( mask_bo == NULL)
759
            return -1;
760
    }
761
 
762
    VG_CLEAR(update);
763
	update.handle = mask_bo->handle;
764
	update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
765
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
766
    mask_bo->pitch = update.bo_pitch;
767
 
768
    memset(&src, 0, sizeof(src));
769
    memset(&dst, 0, sizeof(dst));
770
    memset(&mask, 0, sizeof(dst));
771
 
772
    src.drawable.bitsPerPixel = 32;
773
 
774
    src.drawable.width  = sf->width;
775
    src.drawable.height = sf->height;
776
 
777
    dst.drawable.bitsPerPixel = 32;
778
    dst.drawable.width  = sna_fb.width;
779
    dst.drawable.height = sna_fb.height;
780
 
781
    mask.drawable.bitsPerPixel = 8;
782
    mask.drawable.width  = update.width;
783
    mask.drawable.height = update.height;
784
 
785
    memset(&composite, 0, sizeof(composite));
786
 
787
    src_bo = sf->bo;
788
 
789
    __lock_acquire_recursive(__sna_lock);
790
 
791
 
792
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
793
		      &src, src_bo,
794
		      &mask, mask_bo,
795
		      &dst, sna_fb.fb_bo,
796
              src_x, src_y,
797
              dst_x, dst_y,
798
              winx+dst_x, winy+dst_y,
799
              w, h,
800
              &composite) )
801
    {
802
	    struct sna_composite_rectangles r;
803
 
804
	    r.src.x = src_x;
805
	    r.src.y = src_y;
806
	    r.mask.x = dst_x;
807
	    r.mask.y = dst_y;
808
		r.dst.x = winx+dst_x;
809
	    r.dst.y = winy+dst_y;
810
	    r.width  = w;
811
	    r.height = h;
812
 
813
        composite.blt(sna_device, &composite, &r);
814
        composite.done(sna_device, &composite);
815
 
816
    };
817
 
818
    kgem_submit(&sna_device->kgem);
819
 
820
    __lock_release_recursive(__sna_lock);
821
 
822
    bitmap->data   = (void*)-1;
823
    bitmap->pitch  = -1;
824
 
825
    return 0;
826
}
827
 
828
 
829
 
830
 
831
 
832
 
833
 
834
static const struct intel_device_info intel_generic_info = {
835
	.gen = -1,
836
};
837
 
838
static const struct intel_device_info intel_i915_info = {
839
	.gen = 030,
840
};
841
static const struct intel_device_info intel_i945_info = {
842
	.gen = 031,
843
};
844
 
845
static const struct intel_device_info intel_g33_info = {
846
	.gen = 033,
847
};
848
 
849
static const struct intel_device_info intel_i965_info = {
850
	.gen = 040,
851
};
852
 
853
static const struct intel_device_info intel_g4x_info = {
854
	.gen = 045,
855
};
856
 
857
static const struct intel_device_info intel_ironlake_info = {
858
	.gen = 050,
859
};
860
 
861
static const struct intel_device_info intel_sandybridge_info = {
862
	.gen = 060,
863
};
864
 
865
static const struct intel_device_info intel_ivybridge_info = {
866
	.gen = 070,
867
};
868
 
869
static const struct intel_device_info intel_valleyview_info = {
870
	.gen = 071,
871
};
872
 
873
static const struct intel_device_info intel_haswell_info = {
874
	.gen = 075,
875
};
876
 
877
#define INTEL_DEVICE_MATCH(d,i) \
878
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
879
 
880
 
881
static const struct pci_id_match intel_device_match[] = {
882
 
883
	INTEL_I915G_IDS(&intel_i915_info),
884
	INTEL_I915GM_IDS(&intel_i915_info),
885
	INTEL_I945G_IDS(&intel_i945_info),
886
	INTEL_I945GM_IDS(&intel_i945_info),
887
 
888
	INTEL_G33_IDS(&intel_g33_info),
889
	INTEL_PINEVIEW_IDS(&intel_g33_info),
890
 
891
	INTEL_I965G_IDS(&intel_i965_info),
892
	INTEL_I965GM_IDS(&intel_i965_info),
893
 
894
	INTEL_G45_IDS(&intel_g4x_info),
895
	INTEL_GM45_IDS(&intel_g4x_info),
896
 
897
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
898
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
899
 
900
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
901
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
902
 
903
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
904
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
905
 
906
	INTEL_HSW_D_IDS(&intel_haswell_info),
907
	INTEL_HSW_M_IDS(&intel_haswell_info),
908
 
909
	INTEL_VLV_D_IDS(&intel_valleyview_info),
910
	INTEL_VLV_M_IDS(&intel_valleyview_info),
911
 
912
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
913
 
914
	{ 0, 0, 0 },
915
};
916
 
917
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
918
{
919
    while(list->device_id)
920
    {
921
        if(dev==list->device_id)
922
            return list;
923
        list++;
924
    }
925
    return NULL;
926
}
927
 
928
const struct intel_device_info *
929
intel_detect_chipset(struct pci_device *pci)
930
{
931
    const struct pci_id_match *ent = NULL;
932
 
933
    ent = PciDevMatch(pci->device_id, intel_device_match);
934
 
935
    if(ent != NULL)
936
        return (const struct intel_device_info*)ent->match_data;
937
    else
938
        return &intel_generic_info;
939
 
940
#if 0
941
	for (i = 0; intel_chipsets[i].name != NULL; i++) {
942
		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
943
			name = intel_chipsets[i].name;
944
			break;
945
		}
946
	}
947
	if (name == NULL) {
948
		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
949
		name = "unknown";
950
	} else {
951
		xf86DrvMsg(scrn->scrnIndex, from,
952
			   "Integrated Graphics Chipset: Intel(R) %s\n",
953
			   name);
954
	}
955
 
956
	scrn->chipset = name;
957
#endif
958
 
959
}
960
 
961
int intel_get_device_id(int fd)
962
{
963
	struct drm_i915_getparam gp;
964
	int devid = 0;
965
 
966
	memset(&gp, 0, sizeof(gp));
967
	gp.param = I915_PARAM_CHIPSET_ID;
968
	gp.value = &devid;
969
 
970
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
971
		return 0;
972
 
973
	return devid;
974
}
975
 
976
int drmIoctl(int fd, unsigned long request, void *arg)
977
{
978
    ioctl_t  io;
979
 
980
    io.handle   = fd;
981
    io.io_code  = request;
982
    io.input    = arg;
983
    io.inp_size = 64;
984
    io.output   = NULL;
985
    io.out_size = 0;
986
 
987
    return call_service(&io);
988
}
989