Subversion Repositories Kolibri OS

Rev

Rev 4359 | Rev 4372 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/**************************************************************************
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
5
 
6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
40
#include 
41
#include 
42
#include "i915_pciids.h"
43
 
44
#include "compiler.h"
45
#include "sna.h"
46
 
4315 Serge 47
#include 
48
#include 
49
 
4304 Serge 50
#define to_surface(x) (surface_t*)((x)->handle)
51
 
4368 Serge 52
typedef struct {
53
    int l;
54
    int t;
55
    int r;
56
    int b;
57
} rect_t;
58
 
4304 Serge 59
static struct sna_fb sna_fb;
60
static int    tls_mask;
61
 
62
int tls_alloc(void);
63
 
64
static inline void *tls_get(int key)
65
{
66
    void *val;
67
    __asm__ __volatile__(
68
    "movl %%fs:(%1), %0"
69
    :"=r"(val)
70
    :"r"(key));
71
 
72
  return val;
73
};
74
 
75
static inline int
76
tls_set(int key, const void *ptr)
77
{
78
    if(!(key & 3))
79
    {
80
        __asm__ __volatile__(
81
        "movl %0, %%fs:(%1)"
82
        ::"r"(ptr),"r"(key));
83
        return 0;
84
    }
85
    else return -1;
86
}
87
 
88
 
89
 
90
 
91
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
92
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
93
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
94
				  unsigned flags, uint32_t width, uint32_t height,
95
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
96
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
97
                        int pitch, int height);
98
 
99
void kgem_close_batches(struct kgem *kgem);
100
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
101
 
102
 
103
static bool sna_solid_cache_init(struct sna *sna);
104
 
105
struct sna *sna_device;
106
 
107
__LOCK_INIT_RECURSIVE(, __sna_lock);
108
 
109
static void no_render_reset(struct sna *sna)
110
{
111
	(void)sna;
112
}
113
 
114
static void no_render_flush(struct sna *sna)
115
{
116
	(void)sna;
117
}
118
 
119
static void
120
no_render_context_switch(struct kgem *kgem,
121
			 int new_mode)
122
{
123
	if (!kgem->nbatch)
124
		return;
125
 
126
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
127
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
128
		_kgem_submit(kgem);
129
	}
130
 
131
	(void)new_mode;
132
}
133
 
134
static void
135
no_render_retire(struct kgem *kgem)
136
{
137
	(void)kgem;
138
}
139
 
140
static void
141
no_render_expire(struct kgem *kgem)
142
{
143
	(void)kgem;
144
}
145
 
146
static void
147
no_render_fini(struct sna *sna)
148
{
149
	(void)sna;
150
}
151
 
152
const char *no_render_init(struct sna *sna)
153
{
154
    struct sna_render *render = &sna->render;
155
 
156
    memset (render,0, sizeof (*render));
157
 
158
    render->prefer_gpu = PREFER_GPU_BLT;
159
 
160
    render->vertices = render->vertex_data;
161
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
162
 
163
    render->reset = no_render_reset;
164
	render->flush = no_render_flush;
165
	render->fini = no_render_fini;
166
 
167
	sna->kgem.context_switch = no_render_context_switch;
168
	sna->kgem.retire = no_render_retire;
169
	sna->kgem.expire = no_render_expire;
170
 
171
	sna->kgem.mode = KGEM_RENDER;
172
	sna->kgem.ring = KGEM_RENDER;
173
 
174
	sna_vertex_init(sna);
175
	return "generic";
176
 }
177
 
178
void sna_vertex_init(struct sna *sna)
179
{
180
//    pthread_mutex_init(&sna->render.lock, NULL);
181
//    pthread_cond_init(&sna->render.wait, NULL);
182
    sna->render.active = 0;
183
}
184
 
185
int sna_accel_init(struct sna *sna)
186
{
187
    const char *backend;
188
 
189
	backend = no_render_init(sna);
190
	if (sna->info->gen >= 0100)
191
		(void)backend;
192
	else if (sna->info->gen >= 070)
193
		backend = gen7_render_init(sna, backend);
194
	else if (sna->info->gen >= 060)
195
		backend = gen6_render_init(sna, backend);
196
	else if (sna->info->gen >= 050)
197
		backend = gen5_render_init(sna, backend);
198
	else if (sna->info->gen >= 040)
199
		backend = gen4_render_init(sna, backend);
200
	else if (sna->info->gen >= 030)
201
		backend = gen3_render_init(sna, backend);
202
 
203
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
204
	     __FUNCTION__, backend, sna->render.prefer_gpu));
205
 
206
	kgem_reset(&sna->kgem);
207
 
208
    sna_device = sna;
209
 
210
    return kgem_init_fb(&sna->kgem, &sna_fb);
211
}
212
 
213
int sna_init(uint32_t service)
214
{
215
    ioctl_t   io;
216
    int caps = 0;
217
 
218
    static struct pci_device device;
219
    struct sna *sna;
220
 
221
    DBG(("%s\n", __FUNCTION__));
222
 
223
    __lock_acquire_recursive(__sna_lock);
224
 
225
    if(sna_device)
226
        goto done;
227
 
228
    io.handle   = service;
229
    io.io_code  = SRV_GET_PCI_INFO;
230
    io.input    = &device;
231
    io.inp_size = sizeof(device);
232
    io.output   = NULL;
233
    io.out_size = 0;
234
 
235
    if (call_service(&io)!=0)
236
        goto err1;
237
 
238
    sna = malloc(sizeof(*sna));
239
    if (sna == NULL)
240
        goto err1;
241
 
242
    memset(sna, 0, sizeof(*sna));
243
 
244
    sna->cpu_features = sna_cpu_detect();
245
 
246
    sna->PciInfo = &device;
247
  	sna->info = intel_detect_chipset(sna->PciInfo);
248
    sna->scrn = service;
249
 
250
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
251
 
252
 
253
    /* Disable tiling by default */
254
    sna->tiling = 0;
255
 
256
    /* Default fail-safe value of 75 Hz */
257
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
258
 
259
    sna->flags = 0;
260
 
261
    sna_accel_init(sna);
262
 
263
    tls_mask = tls_alloc();
264
 
265
//    printf("tls mask %x\n", tls_mask);
266
 
267
done:
268
    caps = sna_device->render.caps;
269
 
270
err1:
271
    __lock_release_recursive(__sna_lock);
272
 
273
    return caps;
274
}
275
 
276
void sna_fini()
277
{
4368 Serge 278
    ENTER();
279
 
4304 Serge 280
    if( sna_device )
281
    {
282
        struct kgem_bo *mask;
283
 
284
        __lock_acquire_recursive(__sna_lock);
285
 
286
        mask = tls_get(tls_mask);
287
 
288
        sna_device->render.fini(sna_device);
289
        if(mask)
290
            kgem_bo_destroy(&sna_device->kgem, mask);
4368 Serge 291
//        kgem_close_batches(&sna_device->kgem);
4304 Serge 292
   	    kgem_cleanup_cache(&sna_device->kgem);
293
 
294
   	    sna_device = NULL;
295
        __lock_release_recursive(__sna_lock);
296
    };
4368 Serge 297
    LEAVE();
4304 Serge 298
}
299
 
300
#if 0
301
 
302
static bool sna_solid_cache_init(struct sna *sna)
303
{
304
    struct sna_solid_cache *cache = &sna->render.solid_cache;
305
 
306
    DBG(("%s\n", __FUNCTION__));
307
 
308
    cache->cache_bo =
309
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
310
    if (!cache->cache_bo)
311
        return FALSE;
312
 
313
    /*
314
     * Initialise [0] with white since it is very common and filling the
315
     * zeroth slot simplifies some of the checks.
316
     */
317
    cache->color[0] = 0xffffffff;
318
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
319
    cache->bo[0]->pitch = 4;
320
    cache->dirty = 1;
321
    cache->size = 1;
322
    cache->last = 0;
323
 
324
    return TRUE;
325
}
326
 
327
void
328
sna_render_flush_solid(struct sna *sna)
329
{
330
    struct sna_solid_cache *cache = &sna->render.solid_cache;
331
 
332
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
333
    assert(cache->dirty);
334
    assert(cache->size);
335
 
336
    kgem_bo_write(&sna->kgem, cache->cache_bo,
337
              cache->color, cache->size*sizeof(uint32_t));
338
    cache->dirty = 0;
339
    cache->last = 0;
340
}
341
 
342
static void
343
sna_render_finish_solid(struct sna *sna, bool force)
344
{
345
    struct sna_solid_cache *cache = &sna->render.solid_cache;
346
    int i;
347
 
348
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
349
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
350
 
351
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
352
        return;
353
 
354
    if (cache->dirty)
355
        sna_render_flush_solid(sna);
356
 
357
    for (i = 0; i < cache->size; i++) {
358
        if (cache->bo[i] == NULL)
359
            continue;
360
 
361
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
362
        cache->bo[i] = NULL;
363
    }
364
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
365
 
366
    DBG(("sna_render_finish_solid reset\n"));
367
 
368
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
369
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
370
    cache->bo[0]->pitch = 4;
371
    if (force)
372
        cache->size = 1;
373
}
374
 
375
 
376
struct kgem_bo *
377
sna_render_get_solid(struct sna *sna, uint32_t color)
378
{
379
    struct sna_solid_cache *cache = &sna->render.solid_cache;
380
    int i;
381
 
382
    DBG(("%s: %08x\n", __FUNCTION__, color));
383
 
384
//    if ((color & 0xffffff) == 0) /* alpha only */
385
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
386
 
387
    if (color == 0xffffffff) {
388
        DBG(("%s(white)\n", __FUNCTION__));
389
        return kgem_bo_reference(cache->bo[0]);
390
    }
391
 
392
    if (cache->color[cache->last] == color) {
393
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
394
             cache->last, color));
395
        return kgem_bo_reference(cache->bo[cache->last]);
396
    }
397
 
398
    for (i = 1; i < cache->size; i++) {
399
        if (cache->color[i] == color) {
400
            if (cache->bo[i] == NULL) {
401
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
402
                     i, color));
403
                goto create;
404
            } else {
405
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
406
                     i, color));
407
                goto done;
408
            }
409
        }
410
    }
411
 
412
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
413
 
414
    i = cache->size++;
415
    cache->color[i] = color;
416
    cache->dirty = 1;
417
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
418
 
419
create:
420
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
421
                     i*sizeof(uint32_t), sizeof(uint32_t));
422
    cache->bo[i]->pitch = 4;
423
 
424
done:
425
    cache->last = i;
426
    return kgem_bo_reference(cache->bo[i]);
427
}
428
 
429
#endif
430
 
431
 
432
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
433
                  int w, int h, int src_x, int src_y)
434
 
435
{
436
    struct sna_copy_op copy;
437
    struct _Pixmap src, dst;
438
    struct kgem_bo *src_bo;
439
 
440
    char proc_info[1024];
441
    int winx, winy;
442
 
443
    get_proc_info(proc_info);
444
 
445
    winx = *(uint32_t*)(proc_info+34);
446
    winy = *(uint32_t*)(proc_info+38);
447
 
448
    memset(&src, 0, sizeof(src));
449
    memset(&dst, 0, sizeof(dst));
450
 
451
    src.drawable.bitsPerPixel = 32;
452
    src.drawable.width  = src_bitmap->width;
453
    src.drawable.height = src_bitmap->height;
454
 
455
    dst.drawable.bitsPerPixel = 32;
456
    dst.drawable.width  = sna_fb.width;
457
    dst.drawable.height = sna_fb.height;
458
 
459
    memset(©, 0, sizeof(copy));
460
 
461
    src_bo = (struct kgem_bo*)src_bitmap->handle;
462
 
463
    if( sna_device->render.copy(sna_device, GXcopy,
464
                                &src, src_bo,
465
                                &dst, sna_fb.fb_bo, ©) )
466
    {
467
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
468
        copy.done(sna_device, ©);
469
    }
470
 
471
    kgem_submit(&sna_device->kgem);
472
 
473
    return 0;
474
 
475
//    __asm__ __volatile__("int3");
476
 
477
};
478
 
479
typedef struct
480
{
481
    uint32_t        width;
482
    uint32_t        height;
483
    void           *data;
484
    uint32_t        pitch;
485
    struct kgem_bo *bo;
486
    uint32_t        bo_size;
487
    uint32_t        flags;
488
}surface_t;
489
 
490
 
491
 
492
int sna_create_bitmap(bitmap_t *bitmap)
493
{
494
    surface_t *sf;
495
	struct kgem_bo *bo;
496
 
497
    sf = malloc(sizeof(*sf));
498
    if(sf == NULL)
499
        goto err_1;
500
 
501
    __lock_acquire_recursive(__sna_lock);
502
 
503
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
504
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
505
 
506
    if(bo == NULL)
507
        goto err_2;
508
 
509
    void *map = kgem_bo_map(&sna_device->kgem, bo);
510
    if(map == NULL)
511
        goto err_3;
512
 
513
    sf->width   = bitmap->width;
514
    sf->height  = bitmap->height;
515
    sf->data    = map;
516
    sf->pitch   = bo->pitch;
517
    sf->bo      = bo;
518
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
519
    sf->flags   = bitmap->flags;
520
 
521
    bitmap->handle = (uint32_t)sf;
522
    __lock_release_recursive(__sna_lock);
523
 
524
    return 0;
525
 
526
err_3:
527
    kgem_bo_destroy(&sna_device->kgem, bo);
528
err_2:
529
    __lock_release_recursive(__sna_lock);
530
    free(sf);
531
err_1:
532
    return -1;
533
};
534
 
535
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
536
{
537
    surface_t *sf;
538
	struct kgem_bo *bo;
539
 
540
    sf = malloc(sizeof(*sf));
541
    if(sf == NULL)
542
        goto err_1;
543
 
544
    __lock_acquire_recursive(__sna_lock);
545
 
546
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
547
 
548
    __lock_release_recursive(__sna_lock);
549
 
550
    sf->width   = bitmap->width;
551
    sf->height  = bitmap->height;
552
    sf->data    = NULL;
553
    sf->pitch   = bo->pitch;
554
    sf->bo      = bo;
555
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
556
    sf->flags   = bitmap->flags;
557
 
558
    bitmap->handle = (uint32_t)sf;
559
 
560
    return 0;
561
 
562
err_2:
563
    __lock_release_recursive(__sna_lock);
564
    free(sf);
565
err_1:
566
    return -1;
567
};
568
 
569
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
570
{
571
    surface_t *sf = to_surface(bitmap);
572
    struct kgem_bo *bo = sf->bo;
573
    bo->handle = handle;
574
}
575
 
576
int sna_destroy_bitmap(bitmap_t *bitmap)
577
{
578
    surface_t *sf = to_surface(bitmap);
579
 
580
    __lock_acquire_recursive(__sna_lock);
581
 
582
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
583
 
584
    __lock_release_recursive(__sna_lock);
585
 
586
    free(sf);
587
 
588
    bitmap->handle = -1;
589
    bitmap->data   = (void*)-1;
590
    bitmap->pitch  = -1;
591
 
592
    return 0;
593
};
594
 
595
int sna_lock_bitmap(bitmap_t *bitmap)
596
{
597
    surface_t *sf = to_surface(bitmap);
598
 
599
//    printf("%s\n", __FUNCTION__);
600
    __lock_acquire_recursive(__sna_lock);
601
 
602
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
603
 
604
    __lock_release_recursive(__sna_lock);
605
 
606
    bitmap->data  = sf->data;
607
    bitmap->pitch = sf->pitch;
608
 
609
    return 0;
610
};
611
 
612
int sna_resize_bitmap(bitmap_t *bitmap)
613
{
614
    surface_t *sf = to_surface(bitmap);
615
    struct kgem *kgem = &sna_device->kgem;
616
    struct kgem_bo *bo = sf->bo;
617
 
618
    uint32_t   size;
619
    uint32_t   pitch;
620
 
621
   	bitmap->pitch = -1;
622
    bitmap->data = (void *) -1;
623
 
624
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
625
				 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
626
	assert(size && size <= kgem->max_object_size);
627
 
628
    if(sf->bo_size >= size)
629
    {
630
        sf->width   = bitmap->width;
631
        sf->height  = bitmap->height;
632
        sf->pitch   = pitch;
633
        bo->pitch   = pitch;
634
 
635
	    return 0;
636
    }
637
    else
638
    {
639
        __lock_acquire_recursive(__sna_lock);
640
 
641
        sna_bo_destroy(kgem, bo);
642
 
643
        sf->bo = NULL;
644
 
645
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
646
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
647
 
648
        if(bo == NULL)
649
        {
650
            __lock_release_recursive(__sna_lock);
651
            return -1;
652
        };
653
 
654
        void *map = kgem_bo_map(kgem, bo);
655
        if(map == NULL)
656
        {
657
            sna_bo_destroy(kgem, bo);
658
            __lock_release_recursive(__sna_lock);
659
            return -1;
660
        };
661
 
662
        __lock_release_recursive(__sna_lock);
663
 
664
        sf->width   = bitmap->width;
665
        sf->height  = bitmap->height;
666
        sf->data    = map;
667
        sf->pitch   = bo->pitch;
668
        sf->bo      = bo;
669
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
670
    }
671
 
672
    return 0;
673
};
674
 
675
 
676
 
677
int sna_create_mask()
678
{
679
	struct kgem_bo *bo;
680
 
681
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
682
 
683
    __lock_acquire_recursive(__sna_lock);
684
 
685
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
686
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
687
 
688
    if(unlikely(bo == NULL))
689
        goto err_1;
690
 
691
    int *map = kgem_bo_map(&sna_device->kgem, bo);
692
    if(map == NULL)
693
        goto err_2;
694
 
695
    __lock_release_recursive(__sna_lock);
696
 
697
    memset(map, 0, bo->pitch * sna_fb.height);
698
 
699
    tls_set(tls_mask, bo);
700
 
701
    return 0;
702
 
703
err_2:
704
    kgem_bo_destroy(&sna_device->kgem, bo);
705
err_1:
706
    __lock_release_recursive(__sna_lock);
707
    return -1;
708
};
709
 
4368 Serge 710
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
711
#define MI_WAIT_FOR_EVENT			(0x03<<23)
4304 Serge 712
 
4368 Serge 713
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
714
                        rect_t *crtc,
715
					    int pipe, int y1, int y2,
716
					    bool full_height)
717
{
718
	uint32_t *b;
719
	uint32_t event;
720
 
721
//	if (!sna->kgem.has_secure_batches)
722
//		return false;
723
 
724
	assert(y1 >= 0);
725
	assert(y2 > y1);
726
	assert(sna->kgem.mode == KGEM_RENDER);
727
 
728
	/* Always program one less than the desired value */
729
	if (--y1 < 0)
730
		y1 = crtc->b;
731
	y2--;
732
 
733
	/* The scanline granularity is 3 bits */
734
	y1 &= ~7;
735
	y2 &= ~7;
736
	if (y2 == y1)
737
		return false;
738
 
739
	event = 1 << (3*full_height + pipe*8);
740
 
741
	b = kgem_get_batch(&sna->kgem);
742
	sna->kgem.nbatch += 10;
743
 
744
	b[0] = MI_LOAD_REGISTER_IMM | 1;
745
	b[1] = 0x44050; /* DERRMR */
746
	b[2] = ~event;
747
	b[3] = MI_LOAD_REGISTER_IMM | 1;
748
	b[4] = 0x4f100; /* magic */
749
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
750
	b[6] = MI_WAIT_FOR_EVENT | event;
751
	b[7] = MI_LOAD_REGISTER_IMM | 1;
752
	b[8] = 0x44050; /* DERRMR */
753
	b[9] = ~0;
754
 
755
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
756
 
757
	return true;
758
}
759
 
4304 Serge 760
bool
4368 Serge 761
sna_wait_for_scanline(struct sna *sna,
762
		      rect_t *crtc,
763
		      rect_t *clip)
764
{
765
	bool full_height;
766
	int y1, y2, pipe;
767
	bool ret;
768
 
769
//	if (sna->flags & SNA_NO_VSYNC)
770
//		return false;
771
 
772
	/*
773
	 * Make sure we don't wait for a scanline that will
774
	 * never occur
775
	 */
776
	y1 = clip->t - crtc->t;
777
	if (y1 < 0)
778
		y1 = 0;
779
	y2 = clip->b - crtc->t;
780
	if (y2 > crtc->b - crtc->t)
781
		y2 = crtc->b - crtc->t;
782
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
783
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
784
 
785
	if (y2 <= y1 + 4)
786
		return false;
787
 
788
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
789
 
790
	pipe = 0;
791
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
792
	     __FUNCTION__, pipe, y1, y2, full_height));
793
 
794
	if (sna->kgem.gen >= 0100)
795
		ret = false;
796
//	else if (sna->kgem.gen >= 075)
797
//		ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
798
//	else if (sna->kgem.gen >= 070)
799
//		ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
800
	else if (sna->kgem.gen >= 060)
801
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
802
//	else if (sna->kgem.gen >= 040)
803
//		ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
804
 
805
	return ret;
806
}
807
 
808
 
809
bool
4304 Serge 810
gen6_composite(struct sna *sna,
811
              uint8_t op,
812
		      PixmapPtr src, struct kgem_bo *src_bo,
813
		      PixmapPtr mask,struct kgem_bo *mask_bo,
814
		      PixmapPtr dst, struct kgem_bo *dst_bo,
815
              int32_t src_x, int32_t src_y,
816
              int32_t msk_x, int32_t msk_y,
817
              int32_t dst_x, int32_t dst_y,
818
              int32_t width, int32_t height,
819
              struct sna_composite_op *tmp);
820
 
821
 
822
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
823
 
824
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
825
                  int w, int h, int src_x, int src_y)
826
 
827
{
828
    surface_t *sf = to_surface(bitmap);
829
 
830
    struct drm_i915_mask_update update;
831
 
832
    struct sna_composite_op composite;
833
    struct _Pixmap src, dst, mask;
834
    struct kgem_bo *src_bo, *mask_bo;
835
    int winx, winy;
836
 
837
    char proc_info[1024];
838
 
839
    get_proc_info(proc_info);
840
 
841
    winx = *(uint32_t*)(proc_info+34);
842
    winy = *(uint32_t*)(proc_info+38);
843
//    winw = *(uint32_t*)(proc_info+42)+1;
844
//    winh = *(uint32_t*)(proc_info+46)+1;
845
 
846
    mask_bo = tls_get(tls_mask);
847
 
848
    if(unlikely(mask_bo == NULL))
849
    {
850
        sna_create_mask();
851
        mask_bo = tls_get(tls_mask);
852
        if( mask_bo == NULL)
853
            return -1;
854
    };
855
 
856
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
857
    {
858
        __lock_acquire_recursive(__sna_lock);
859
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
860
        __lock_release_recursive(__sna_lock);
861
 
862
        sna_create_mask();
863
        mask_bo = tls_get(tls_mask);
864
        if( mask_bo == NULL)
865
            return -1;
866
    }
867
 
868
    VG_CLEAR(update);
869
	update.handle = mask_bo->handle;
870
	update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
871
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
872
    mask_bo->pitch = update.bo_pitch;
873
 
874
    memset(&src, 0, sizeof(src));
875
    memset(&dst, 0, sizeof(dst));
876
    memset(&mask, 0, sizeof(dst));
877
 
878
    src.drawable.bitsPerPixel = 32;
879
 
880
    src.drawable.width  = sf->width;
881
    src.drawable.height = sf->height;
882
 
883
    dst.drawable.bitsPerPixel = 32;
884
    dst.drawable.width  = sna_fb.width;
885
    dst.drawable.height = sna_fb.height;
886
 
887
    mask.drawable.bitsPerPixel = 8;
888
    mask.drawable.width  = update.width;
889
    mask.drawable.height = update.height;
890
 
891
    memset(&composite, 0, sizeof(composite));
892
 
893
    src_bo = sf->bo;
894
 
895
    __lock_acquire_recursive(__sna_lock);
896
 
4368 Serge 897
    {
898
        rect_t crtc, clip;
4304 Serge 899
 
4368 Serge 900
        crtc.l = 0;
901
        crtc.t = 0;
902
        crtc.r = sna_fb.width-1;
903
        crtc.b = sna_fb.height-1;
904
 
905
        clip.l = winx+dst_x;
906
        clip.t = winy+dst_y;
907
        clip.r = clip.l+w-1;
908
        clip.b = clip.t+h-1;
909
 
910
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
911
        sna_wait_for_scanline(sna_device, &crtc, &clip);
912
    }
913
 
4304 Serge 914
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
915
		      &src, src_bo,
916
		      &mask, mask_bo,
917
		      &dst, sna_fb.fb_bo,
918
              src_x, src_y,
919
              dst_x, dst_y,
920
              winx+dst_x, winy+dst_y,
921
              w, h,
922
              &composite) )
923
    {
924
	    struct sna_composite_rectangles r;
925
 
926
	    r.src.x = src_x;
927
	    r.src.y = src_y;
928
	    r.mask.x = dst_x;
929
	    r.mask.y = dst_y;
930
		r.dst.x = winx+dst_x;
931
	    r.dst.y = winy+dst_y;
932
	    r.width  = w;
933
	    r.height = h;
934
 
935
        composite.blt(sna_device, &composite, &r);
936
        composite.done(sna_device, &composite);
937
 
938
    };
939
 
940
    kgem_submit(&sna_device->kgem);
941
 
942
    __lock_release_recursive(__sna_lock);
943
 
944
    bitmap->data   = (void*)-1;
945
    bitmap->pitch  = -1;
946
 
947
    return 0;
948
}
949
 
950
 
951
 
952
 
953
 
954
 
955
 
956
static const struct intel_device_info intel_generic_info = {
957
	.gen = -1,
958
};
959
 
960
static const struct intel_device_info intel_i915_info = {
961
	.gen = 030,
962
};
963
static const struct intel_device_info intel_i945_info = {
964
	.gen = 031,
965
};
966
 
967
static const struct intel_device_info intel_g33_info = {
968
	.gen = 033,
969
};
970
 
971
static const struct intel_device_info intel_i965_info = {
972
	.gen = 040,
973
};
974
 
975
static const struct intel_device_info intel_g4x_info = {
976
	.gen = 045,
977
};
978
 
979
static const struct intel_device_info intel_ironlake_info = {
980
	.gen = 050,
981
};
982
 
983
static const struct intel_device_info intel_sandybridge_info = {
984
	.gen = 060,
985
};
986
 
987
static const struct intel_device_info intel_ivybridge_info = {
988
	.gen = 070,
989
};
990
 
991
static const struct intel_device_info intel_valleyview_info = {
992
	.gen = 071,
993
};
994
 
995
static const struct intel_device_info intel_haswell_info = {
996
	.gen = 075,
997
};
998
 
999
#define INTEL_DEVICE_MATCH(d,i) \
1000
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
1001
 
1002
 
1003
static const struct pci_id_match intel_device_match[] = {
1004
 
1005
	INTEL_I915G_IDS(&intel_i915_info),
1006
	INTEL_I915GM_IDS(&intel_i915_info),
1007
	INTEL_I945G_IDS(&intel_i945_info),
1008
	INTEL_I945GM_IDS(&intel_i945_info),
1009
 
1010
	INTEL_G33_IDS(&intel_g33_info),
1011
	INTEL_PINEVIEW_IDS(&intel_g33_info),
1012
 
1013
	INTEL_I965G_IDS(&intel_i965_info),
1014
	INTEL_I965GM_IDS(&intel_i965_info),
1015
 
1016
	INTEL_G45_IDS(&intel_g4x_info),
1017
	INTEL_GM45_IDS(&intel_g4x_info),
1018
 
1019
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
1020
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
1021
 
1022
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
1023
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
1024
 
1025
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
1026
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
1027
 
1028
	INTEL_HSW_D_IDS(&intel_haswell_info),
1029
	INTEL_HSW_M_IDS(&intel_haswell_info),
1030
 
1031
	INTEL_VLV_D_IDS(&intel_valleyview_info),
1032
	INTEL_VLV_M_IDS(&intel_valleyview_info),
1033
 
1034
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
1035
 
1036
	{ 0, 0, 0 },
1037
};
1038
 
1039
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
1040
{
1041
    while(list->device_id)
1042
    {
1043
        if(dev==list->device_id)
1044
            return list;
1045
        list++;
1046
    }
1047
    return NULL;
1048
}
1049
 
1050
const struct intel_device_info *
1051
intel_detect_chipset(struct pci_device *pci)
1052
{
1053
    const struct pci_id_match *ent = NULL;
1054
 
1055
    ent = PciDevMatch(pci->device_id, intel_device_match);
1056
 
1057
    if(ent != NULL)
1058
        return (const struct intel_device_info*)ent->match_data;
1059
    else
1060
        return &intel_generic_info;
1061
}
1062
 
1063
int intel_get_device_id(int fd)
1064
{
1065
	struct drm_i915_getparam gp;
1066
	int devid = 0;
1067
 
1068
	memset(&gp, 0, sizeof(gp));
1069
	gp.param = I915_PARAM_CHIPSET_ID;
1070
	gp.value = &devid;
1071
 
1072
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
1073
		return 0;
1074
 
1075
	return devid;
1076
}
1077
 
1078
int drmIoctl(int fd, unsigned long request, void *arg)
1079
{
1080
    ioctl_t  io;
1081
 
1082
    io.handle   = fd;
1083
    io.io_code  = request;
1084
    io.input    = arg;
1085
    io.inp_size = 64;
1086
    io.output   = NULL;
1087
    io.out_size = 0;
1088
 
1089
    return call_service(&io);
1090
}
1091
 
1092
 
1093