Subversion Repositories Kolibri OS

Rev

Rev 4372 | Rev 4375 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4304 Serge 1
/**************************************************************************
2
 
3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
5
 
6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
40
#include 
41
#include 
42
#include "i915_pciids.h"
43
 
44
#include "compiler.h"
45
#include "sna.h"
46
 
4315 Serge 47
#include 
48
#include 
49
 
4304 Serge 50
#define to_surface(x) (surface_t*)((x)->handle)
51
 
4368 Serge 52
typedef struct {
53
    int l;
54
    int t;
55
    int r;
56
    int b;
57
} rect_t;
58
 
4372 Serge 59
struct pix_driver
60
{
61
    char *name;
62
 
63
    int (*create_bitmap)(bitmap_t * bitmap);
64
    int (*destroy_bitmap)(bitmap_t * bitmap);
65
    int (*lock_bitmap)(bitmap_t * bitmap);
66
    int (*blit)(bitmap_t * bitmap, bool scale, int dst_x, int dst_y,
67
                int w, int h, int src_x, int src_y);
68
    int (*resize_bitmap)(bitmap_t * bitmap);
69
    void (*fini)(void);
70
};
71
 
72
 
4304 Serge 73
static struct sna_fb sna_fb;
74
static int    tls_mask;
75
 
76
int tls_alloc(void);
77
 
78
static inline void *tls_get(int key)
79
{
80
    void *val;
81
    __asm__ __volatile__(
82
    "movl %%fs:(%1), %0"
83
    :"=r"(val)
84
    :"r"(key));
85
 
86
  return val;
87
};
88
 
89
static inline int
90
tls_set(int key, const void *ptr)
91
{
92
    if(!(key & 3))
93
    {
94
        __asm__ __volatile__(
95
        "movl %0, %%fs:(%1)"
96
        ::"r"(ptr),"r"(key));
97
        return 0;
98
    }
99
    else return -1;
100
}
101
 
102
 
103
 
104
 
105
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
106
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
107
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
108
				  unsigned flags, uint32_t width, uint32_t height,
109
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
110
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
111
                        int pitch, int height);
112
 
113
void kgem_close_batches(struct kgem *kgem);
114
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
115
 
116
 
117
static bool sna_solid_cache_init(struct sna *sna);
118
 
119
struct sna *sna_device;
120
 
121
__LOCK_INIT_RECURSIVE(, __sna_lock);
122
 
123
static void no_render_reset(struct sna *sna)
124
{
125
	(void)sna;
126
}
127
 
128
static void no_render_flush(struct sna *sna)
129
{
130
	(void)sna;
131
}
132
 
133
static void
134
no_render_context_switch(struct kgem *kgem,
135
			 int new_mode)
136
{
137
	if (!kgem->nbatch)
138
		return;
139
 
140
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
141
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
142
		_kgem_submit(kgem);
143
	}
144
 
145
	(void)new_mode;
146
}
147
 
148
static void
149
no_render_retire(struct kgem *kgem)
150
{
151
	(void)kgem;
152
}
153
 
154
static void
155
no_render_expire(struct kgem *kgem)
156
{
157
	(void)kgem;
158
}
159
 
160
static void
161
no_render_fini(struct sna *sna)
162
{
163
	(void)sna;
164
}
165
 
166
const char *no_render_init(struct sna *sna)
167
{
168
    struct sna_render *render = &sna->render;
169
 
170
    memset (render,0, sizeof (*render));
171
 
172
    render->prefer_gpu = PREFER_GPU_BLT;
173
 
174
    render->vertices = render->vertex_data;
175
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
176
 
177
    render->reset = no_render_reset;
178
	render->flush = no_render_flush;
179
	render->fini = no_render_fini;
180
 
181
	sna->kgem.context_switch = no_render_context_switch;
182
	sna->kgem.retire = no_render_retire;
183
	sna->kgem.expire = no_render_expire;
184
 
185
	sna->kgem.mode = KGEM_RENDER;
186
	sna->kgem.ring = KGEM_RENDER;
187
 
188
	sna_vertex_init(sna);
189
	return "generic";
190
 }
191
 
192
void sna_vertex_init(struct sna *sna)
193
{
194
//    pthread_mutex_init(&sna->render.lock, NULL);
195
//    pthread_cond_init(&sna->render.wait, NULL);
196
    sna->render.active = 0;
197
}
198
 
199
int sna_accel_init(struct sna *sna)
200
{
201
    const char *backend;
202
 
203
	backend = no_render_init(sna);
204
	if (sna->info->gen >= 0100)
205
		(void)backend;
206
	else if (sna->info->gen >= 070)
207
		backend = gen7_render_init(sna, backend);
208
	else if (sna->info->gen >= 060)
209
		backend = gen6_render_init(sna, backend);
210
	else if (sna->info->gen >= 050)
211
		backend = gen5_render_init(sna, backend);
212
	else if (sna->info->gen >= 040)
213
		backend = gen4_render_init(sna, backend);
214
	else if (sna->info->gen >= 030)
215
		backend = gen3_render_init(sna, backend);
216
 
217
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
218
	     __FUNCTION__, backend, sna->render.prefer_gpu));
219
 
220
	kgem_reset(&sna->kgem);
221
 
222
    sna_device = sna;
223
 
224
    return kgem_init_fb(&sna->kgem, &sna_fb);
225
}
226
 
227
 
228
#if 0
229
 
230
static bool sna_solid_cache_init(struct sna *sna)
231
{
232
    struct sna_solid_cache *cache = &sna->render.solid_cache;
233
 
234
    DBG(("%s\n", __FUNCTION__));
235
 
236
    cache->cache_bo =
237
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
238
    if (!cache->cache_bo)
239
        return FALSE;
240
 
241
    /*
242
     * Initialise [0] with white since it is very common and filling the
243
     * zeroth slot simplifies some of the checks.
244
     */
245
    cache->color[0] = 0xffffffff;
246
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
247
    cache->bo[0]->pitch = 4;
248
    cache->dirty = 1;
249
    cache->size = 1;
250
    cache->last = 0;
251
 
252
    return TRUE;
253
}
254
 
255
void
256
sna_render_flush_solid(struct sna *sna)
257
{
258
    struct sna_solid_cache *cache = &sna->render.solid_cache;
259
 
260
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
261
    assert(cache->dirty);
262
    assert(cache->size);
263
 
264
    kgem_bo_write(&sna->kgem, cache->cache_bo,
265
              cache->color, cache->size*sizeof(uint32_t));
266
    cache->dirty = 0;
267
    cache->last = 0;
268
}
269
 
270
static void
271
sna_render_finish_solid(struct sna *sna, bool force)
272
{
273
    struct sna_solid_cache *cache = &sna->render.solid_cache;
274
    int i;
275
 
276
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
277
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
278
 
279
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
280
        return;
281
 
282
    if (cache->dirty)
283
        sna_render_flush_solid(sna);
284
 
285
    for (i = 0; i < cache->size; i++) {
286
        if (cache->bo[i] == NULL)
287
            continue;
288
 
289
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
290
        cache->bo[i] = NULL;
291
    }
292
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
293
 
294
    DBG(("sna_render_finish_solid reset\n"));
295
 
296
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
297
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
298
    cache->bo[0]->pitch = 4;
299
    if (force)
300
        cache->size = 1;
301
}
302
 
303
 
304
struct kgem_bo *
305
sna_render_get_solid(struct sna *sna, uint32_t color)
306
{
307
    struct sna_solid_cache *cache = &sna->render.solid_cache;
308
    int i;
309
 
310
    DBG(("%s: %08x\n", __FUNCTION__, color));
311
 
312
//    if ((color & 0xffffff) == 0) /* alpha only */
313
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
314
 
315
    if (color == 0xffffffff) {
316
        DBG(("%s(white)\n", __FUNCTION__));
317
        return kgem_bo_reference(cache->bo[0]);
318
    }
319
 
320
    if (cache->color[cache->last] == color) {
321
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
322
             cache->last, color));
323
        return kgem_bo_reference(cache->bo[cache->last]);
324
    }
325
 
326
    for (i = 1; i < cache->size; i++) {
327
        if (cache->color[i] == color) {
328
            if (cache->bo[i] == NULL) {
329
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
330
                     i, color));
331
                goto create;
332
            } else {
333
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
334
                     i, color));
335
                goto done;
336
            }
337
        }
338
    }
339
 
340
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
341
 
342
    i = cache->size++;
343
    cache->color[i] = color;
344
    cache->dirty = 1;
345
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
346
 
347
create:
348
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
349
                     i*sizeof(uint32_t), sizeof(uint32_t));
350
    cache->bo[i]->pitch = 4;
351
 
352
done:
353
    cache->last = i;
354
    return kgem_bo_reference(cache->bo[i]);
355
}
356
 
357
#endif
358
 
359
 
360
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
361
                  int w, int h, int src_x, int src_y)
362
 
363
{
364
    struct sna_copy_op copy;
365
    struct _Pixmap src, dst;
366
    struct kgem_bo *src_bo;
367
 
368
    char proc_info[1024];
369
    int winx, winy;
370
 
371
    get_proc_info(proc_info);
372
 
373
    winx = *(uint32_t*)(proc_info+34);
374
    winy = *(uint32_t*)(proc_info+38);
375
 
376
    memset(&src, 0, sizeof(src));
377
    memset(&dst, 0, sizeof(dst));
378
 
379
    src.drawable.bitsPerPixel = 32;
380
    src.drawable.width  = src_bitmap->width;
381
    src.drawable.height = src_bitmap->height;
382
 
383
    dst.drawable.bitsPerPixel = 32;
384
    dst.drawable.width  = sna_fb.width;
385
    dst.drawable.height = sna_fb.height;
386
 
387
    memset(©, 0, sizeof(copy));
388
 
389
    src_bo = (struct kgem_bo*)src_bitmap->handle;
390
 
391
    if( sna_device->render.copy(sna_device, GXcopy,
392
                                &src, src_bo,
393
                                &dst, sna_fb.fb_bo, ©) )
394
    {
395
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
396
        copy.done(sna_device, ©);
397
    }
398
 
399
    kgem_submit(&sna_device->kgem);
400
 
401
    return 0;
402
 
403
//    __asm__ __volatile__("int3");
404
 
405
};
406
 
407
typedef struct
408
{
409
    uint32_t        width;
410
    uint32_t        height;
411
    void           *data;
412
    uint32_t        pitch;
413
    struct kgem_bo *bo;
414
    uint32_t        bo_size;
415
    uint32_t        flags;
416
}surface_t;
417
 
418
 
419
 
4372 Serge 420
 
421
#define MI_LOAD_REGISTER_IMM		(0x22<<23)
422
#define MI_WAIT_FOR_EVENT			(0x03<<23)
423
 
424
static bool sna_emit_wait_for_scanline_gen6(struct sna *sna,
425
                        rect_t *crtc,
426
					    int pipe, int y1, int y2,
427
					    bool full_height)
4304 Serge 428
{
4372 Serge 429
	uint32_t *b;
430
	uint32_t event;
431
 
432
//	if (!sna->kgem.has_secure_batches)
433
//		return false;
434
 
435
	assert(y1 >= 0);
436
	assert(y2 > y1);
437
	assert(sna->kgem.mode == KGEM_RENDER);
438
 
439
	/* Always program one less than the desired value */
440
	if (--y1 < 0)
441
		y1 = crtc->b;
442
	y2--;
443
 
444
	/* The scanline granularity is 3 bits */
445
	y1 &= ~7;
446
	y2 &= ~7;
447
	if (y2 == y1)
448
		return false;
449
 
450
	event = 1 << (3*full_height + pipe*8);
451
 
452
	b = kgem_get_batch(&sna->kgem);
453
	sna->kgem.nbatch += 10;
454
 
455
	b[0] = MI_LOAD_REGISTER_IMM | 1;
456
	b[1] = 0x44050; /* DERRMR */
457
	b[2] = ~event;
458
	b[3] = MI_LOAD_REGISTER_IMM | 1;
459
	b[4] = 0x4f100; /* magic */
460
	b[5] = (1 << 31) | (1 << 30) | pipe << 29 | (y1 << 16) | y2;
461
	b[6] = MI_WAIT_FOR_EVENT | event;
462
	b[7] = MI_LOAD_REGISTER_IMM | 1;
463
	b[8] = 0x44050; /* DERRMR */
464
	b[9] = ~0;
465
 
466
	sna->kgem.batch_flags |= I915_EXEC_SECURE;
467
 
468
	return true;
469
}
470
 
471
bool
472
sna_wait_for_scanline(struct sna *sna,
473
		      rect_t *crtc,
474
		      rect_t *clip)
475
{
476
	bool full_height;
477
	int y1, y2, pipe;
478
	bool ret;
479
 
480
//	if (sna->flags & SNA_NO_VSYNC)
481
//		return false;
482
 
483
	/*
484
	 * Make sure we don't wait for a scanline that will
485
	 * never occur
486
	 */
487
	y1 = clip->t - crtc->t;
4374 Serge 488
    if (y1 < 1)
489
        y1 = 1;
4372 Serge 490
	y2 = clip->b - crtc->t;
491
	if (y2 > crtc->b - crtc->t)
492
		y2 = crtc->b - crtc->t;
493
//	DBG(("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2));
494
//	printf("%s: clipped range = %d, %d\n", __FUNCTION__, y1, y2);
495
 
496
	if (y2 <= y1 + 4)
497
		return false;
498
 
499
	full_height = y1 == 0 && y2 == crtc->b - crtc->t;
500
 
501
	pipe = 0;
502
	DBG(("%s: pipe=%d, y1=%d, y2=%d, full_height?=%d\n",
503
	     __FUNCTION__, pipe, y1, y2, full_height));
504
 
505
	if (sna->kgem.gen >= 0100)
506
		ret = false;
507
//	else if (sna->kgem.gen >= 075)
508
//		ret = sna_emit_wait_for_scanline_hsw(sna, crtc, pipe, y1, y2, full_height);
509
//	else if (sna->kgem.gen >= 070)
510
//		ret = sna_emit_wait_for_scanline_ivb(sna, crtc, pipe, y1, y2, full_height);
511
	else if (sna->kgem.gen >= 060)
512
		ret =sna_emit_wait_for_scanline_gen6(sna, crtc, pipe, y1, y2, full_height);
513
//	else if (sna->kgem.gen >= 040)
514
//		ret = sna_emit_wait_for_scanline_gen4(sna, crtc, pipe, y1, y2, full_height);
515
 
516
	return ret;
517
}
518
 
519
 
520
 
521
 
522
 
523
 
524
 
525
 
526
 
527
static const struct intel_device_info intel_generic_info = {
528
	.gen = -1,
529
};
530
 
531
static const struct intel_device_info intel_i915_info = {
532
	.gen = 030,
533
};
534
static const struct intel_device_info intel_i945_info = {
535
	.gen = 031,
536
};
537
 
538
static const struct intel_device_info intel_g33_info = {
539
	.gen = 033,
540
};
541
 
542
static const struct intel_device_info intel_i965_info = {
543
	.gen = 040,
544
};
545
 
546
static const struct intel_device_info intel_g4x_info = {
547
	.gen = 045,
548
};
549
 
550
static const struct intel_device_info intel_ironlake_info = {
551
	.gen = 050,
552
};
553
 
554
static const struct intel_device_info intel_sandybridge_info = {
555
	.gen = 060,
556
};
557
 
558
static const struct intel_device_info intel_ivybridge_info = {
559
	.gen = 070,
560
};
561
 
562
static const struct intel_device_info intel_valleyview_info = {
563
	.gen = 071,
564
};
565
 
566
static const struct intel_device_info intel_haswell_info = {
567
	.gen = 075,
568
};
569
 
570
#define INTEL_DEVICE_MATCH(d,i) \
571
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
572
 
573
 
574
static const struct pci_id_match intel_device_match[] = {
575
 
576
	INTEL_I915G_IDS(&intel_i915_info),
577
	INTEL_I915GM_IDS(&intel_i915_info),
578
	INTEL_I945G_IDS(&intel_i945_info),
579
	INTEL_I945GM_IDS(&intel_i945_info),
580
 
581
	INTEL_G33_IDS(&intel_g33_info),
582
	INTEL_PINEVIEW_IDS(&intel_g33_info),
583
 
584
	INTEL_I965G_IDS(&intel_i965_info),
585
	INTEL_I965GM_IDS(&intel_i965_info),
586
 
587
	INTEL_G45_IDS(&intel_g4x_info),
588
	INTEL_GM45_IDS(&intel_g4x_info),
589
 
590
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
591
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
592
 
593
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
594
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
595
 
596
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
597
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
598
 
599
	INTEL_HSW_D_IDS(&intel_haswell_info),
600
	INTEL_HSW_M_IDS(&intel_haswell_info),
601
 
602
	INTEL_VLV_D_IDS(&intel_valleyview_info),
603
	INTEL_VLV_M_IDS(&intel_valleyview_info),
604
 
605
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
606
 
607
	{ 0, 0, 0 },
608
};
609
 
610
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
611
{
612
    while(list->device_id)
613
    {
614
        if(dev==list->device_id)
615
            return list;
616
        list++;
617
    }
618
    return NULL;
619
}
620
 
621
const struct intel_device_info *
622
intel_detect_chipset(struct pci_device *pci)
623
{
624
    const struct pci_id_match *ent = NULL;
625
 
626
    ent = PciDevMatch(pci->device_id, intel_device_match);
627
 
628
    if(ent != NULL)
629
        return (const struct intel_device_info*)ent->match_data;
630
    else
631
        return &intel_generic_info;
632
}
633
 
634
int intel_get_device_id(int fd)
635
{
636
	struct drm_i915_getparam gp;
637
	int devid = 0;
638
 
639
	memset(&gp, 0, sizeof(gp));
640
	gp.param = I915_PARAM_CHIPSET_ID;
641
	gp.value = &devid;
642
 
643
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
644
		return 0;
645
 
646
	return devid;
647
}
648
 
649
int drmIoctl(int fd, unsigned long request, void *arg)
650
{
651
    ioctl_t  io;
652
 
653
    io.handle   = fd;
654
    io.io_code  = request;
655
    io.input    = arg;
656
    io.inp_size = 64;
657
    io.output   = NULL;
658
    io.out_size = 0;
659
 
660
    return call_service(&io);
661
}
662
 
663
 
664
 
665
bool
666
gen6_composite(struct sna *sna,
667
              uint8_t op,
668
              PixmapPtr src, struct kgem_bo *src_bo,
669
              PixmapPtr mask,struct kgem_bo *mask_bo,
670
              PixmapPtr dst, struct kgem_bo *dst_bo,
671
              int32_t src_x, int32_t src_y,
672
              int32_t msk_x, int32_t msk_y,
673
              int32_t dst_x, int32_t dst_y,
674
              int32_t width, int32_t height,
675
              struct sna_composite_op *tmp);
676
 
677
//#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
678
 
679
 
680
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
681
{
4304 Serge 682
    surface_t *sf;
4372 Serge 683
    struct kgem_bo *bo;
4304 Serge 684
 
685
    sf = malloc(sizeof(*sf));
686
    if(sf == NULL)
687
        goto err_1;
688
 
689
    __lock_acquire_recursive(__sna_lock);
690
 
4372 Serge 691
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
4304 Serge 692
 
4372 Serge 693
    __lock_release_recursive(__sna_lock);
4304 Serge 694
 
695
    sf->width   = bitmap->width;
696
    sf->height  = bitmap->height;
4372 Serge 697
    sf->data    = NULL;
4304 Serge 698
    sf->pitch   = bo->pitch;
699
    sf->bo      = bo;
700
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
701
    sf->flags   = bitmap->flags;
702
 
703
    bitmap->handle = (uint32_t)sf;
704
 
705
    return 0;
706
 
707
err_2:
708
    __lock_release_recursive(__sna_lock);
709
    free(sf);
710
err_1:
711
    return -1;
712
};
713
 
4372 Serge 714
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
4304 Serge 715
{
4372 Serge 716
    surface_t *sf = to_surface(bitmap);
717
    struct kgem_bo *bo = sf->bo;
718
    bo->handle = handle;
719
}
720
 
721
static int sna_create_bitmap(bitmap_t *bitmap)
722
{
4304 Serge 723
    surface_t *sf;
4372 Serge 724
    struct kgem_bo *bo;
4304 Serge 725
 
726
    sf = malloc(sizeof(*sf));
727
    if(sf == NULL)
728
        goto err_1;
729
 
730
    __lock_acquire_recursive(__sna_lock);
731
 
4372 Serge 732
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
733
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
4304 Serge 734
 
4372 Serge 735
    if(bo == NULL)
736
        goto err_2;
4304 Serge 737
 
4372 Serge 738
    void *map = kgem_bo_map(&sna_device->kgem, bo);
739
    if(map == NULL)
740
        goto err_3;
741
 
4304 Serge 742
    sf->width   = bitmap->width;
743
    sf->height  = bitmap->height;
4372 Serge 744
    sf->data    = map;
4304 Serge 745
    sf->pitch   = bo->pitch;
746
    sf->bo      = bo;
747
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
748
    sf->flags   = bitmap->flags;
749
 
750
    bitmap->handle = (uint32_t)sf;
4372 Serge 751
    __lock_release_recursive(__sna_lock);
4304 Serge 752
 
753
    return 0;
754
 
4372 Serge 755
err_3:
756
    kgem_bo_destroy(&sna_device->kgem, bo);
4304 Serge 757
err_2:
758
    __lock_release_recursive(__sna_lock);
759
    free(sf);
760
err_1:
761
    return -1;
762
};
763
 
4372 Serge 764
static int sna_destroy_bitmap(bitmap_t *bitmap)
4304 Serge 765
{
766
    surface_t *sf = to_surface(bitmap);
767
 
768
    __lock_acquire_recursive(__sna_lock);
769
 
770
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
771
 
772
    __lock_release_recursive(__sna_lock);
773
 
774
    free(sf);
775
 
776
    bitmap->handle = -1;
777
    bitmap->data   = (void*)-1;
778
    bitmap->pitch  = -1;
779
 
780
    return 0;
781
};
782
 
4372 Serge 783
static int sna_lock_bitmap(bitmap_t *bitmap)
4304 Serge 784
{
785
    surface_t *sf = to_surface(bitmap);
786
 
787
//    printf("%s\n", __FUNCTION__);
788
    __lock_acquire_recursive(__sna_lock);
789
 
790
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
791
 
792
    __lock_release_recursive(__sna_lock);
793
 
794
    bitmap->data  = sf->data;
795
    bitmap->pitch = sf->pitch;
796
 
797
    return 0;
798
};
799
 
4372 Serge 800
static int sna_resize_bitmap(bitmap_t *bitmap)
4304 Serge 801
{
802
    surface_t *sf = to_surface(bitmap);
803
    struct kgem *kgem = &sna_device->kgem;
804
    struct kgem_bo *bo = sf->bo;
805
 
806
    uint32_t   size;
807
    uint32_t   pitch;
808
 
4372 Serge 809
    bitmap->pitch = -1;
4304 Serge 810
    bitmap->data = (void *) -1;
811
 
4372 Serge 812
    size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
813
                 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
814
    assert(size && size <= kgem->max_object_size);
4304 Serge 815
 
816
    if(sf->bo_size >= size)
817
    {
818
        sf->width   = bitmap->width;
819
        sf->height  = bitmap->height;
820
        sf->pitch   = pitch;
821
        bo->pitch   = pitch;
822
 
4372 Serge 823
        return 0;
4304 Serge 824
    }
825
    else
826
    {
827
        __lock_acquire_recursive(__sna_lock);
828
 
829
        sna_bo_destroy(kgem, bo);
830
 
831
        sf->bo = NULL;
832
 
833
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
834
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
835
 
836
        if(bo == NULL)
837
        {
838
            __lock_release_recursive(__sna_lock);
839
            return -1;
840
        };
841
 
842
        void *map = kgem_bo_map(kgem, bo);
843
        if(map == NULL)
844
        {
845
            sna_bo_destroy(kgem, bo);
846
            __lock_release_recursive(__sna_lock);
847
            return -1;
848
        };
849
 
850
        __lock_release_recursive(__sna_lock);
851
 
852
        sf->width   = bitmap->width;
853
        sf->height  = bitmap->height;
854
        sf->data    = map;
855
        sf->pitch   = bo->pitch;
856
        sf->bo      = bo;
857
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
858
    }
859
 
860
    return 0;
861
};
862
 
863
 
864
 
865
int sna_create_mask()
866
{
4372 Serge 867
    struct kgem_bo *bo;
4304 Serge 868
 
869
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
870
 
871
    __lock_acquire_recursive(__sna_lock);
872
 
873
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
874
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
875
 
876
    if(unlikely(bo == NULL))
877
        goto err_1;
878
 
879
    int *map = kgem_bo_map(&sna_device->kgem, bo);
880
    if(map == NULL)
881
        goto err_2;
882
 
883
    __lock_release_recursive(__sna_lock);
884
 
885
    memset(map, 0, bo->pitch * sna_fb.height);
886
 
887
    tls_set(tls_mask, bo);
888
 
889
    return 0;
890
 
891
err_2:
892
    kgem_bo_destroy(&sna_device->kgem, bo);
893
err_1:
894
    __lock_release_recursive(__sna_lock);
895
    return -1;
896
};
897
 
898
 
4368 Serge 899
 
4304 Serge 900
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
901
                  int w, int h, int src_x, int src_y)
902
 
903
{
904
    surface_t *sf = to_surface(bitmap);
905
 
906
    struct drm_i915_mask_update update;
907
 
908
    struct sna_composite_op composite;
909
    struct _Pixmap src, dst, mask;
910
    struct kgem_bo *src_bo, *mask_bo;
911
    int winx, winy;
912
 
913
    char proc_info[1024];
914
 
915
    get_proc_info(proc_info);
916
 
917
    winx = *(uint32_t*)(proc_info+34);
918
    winy = *(uint32_t*)(proc_info+38);
919
//    winw = *(uint32_t*)(proc_info+42)+1;
920
//    winh = *(uint32_t*)(proc_info+46)+1;
921
 
922
    mask_bo = tls_get(tls_mask);
923
 
924
    if(unlikely(mask_bo == NULL))
925
    {
926
        sna_create_mask();
927
        mask_bo = tls_get(tls_mask);
928
        if( mask_bo == NULL)
929
            return -1;
930
    };
931
 
932
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
933
    {
934
        __lock_acquire_recursive(__sna_lock);
935
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
936
        __lock_release_recursive(__sna_lock);
937
 
938
        sna_create_mask();
939
        mask_bo = tls_get(tls_mask);
940
        if( mask_bo == NULL)
941
            return -1;
942
    }
943
 
944
    VG_CLEAR(update);
4372 Serge 945
    update.handle = mask_bo->handle;
946
    update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
947
    drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
4304 Serge 948
    mask_bo->pitch = update.bo_pitch;
949
 
950
    memset(&src, 0, sizeof(src));
951
    memset(&dst, 0, sizeof(dst));
952
    memset(&mask, 0, sizeof(dst));
953
 
954
    src.drawable.bitsPerPixel = 32;
955
 
956
    src.drawable.width  = sf->width;
957
    src.drawable.height = sf->height;
958
 
959
    dst.drawable.bitsPerPixel = 32;
960
    dst.drawable.width  = sna_fb.width;
961
    dst.drawable.height = sna_fb.height;
962
 
963
    mask.drawable.bitsPerPixel = 8;
964
    mask.drawable.width  = update.width;
965
    mask.drawable.height = update.height;
966
 
967
    memset(&composite, 0, sizeof(composite));
968
 
969
    src_bo = sf->bo;
970
 
971
    __lock_acquire_recursive(__sna_lock);
972
 
4374 Serge 973
#if 1
4368 Serge 974
    {
975
        rect_t crtc, clip;
4304 Serge 976
 
4368 Serge 977
        crtc.l = 0;
978
        crtc.t = 0;
979
        crtc.r = sna_fb.width-1;
980
        crtc.b = sna_fb.height-1;
981
 
982
        clip.l = winx+dst_x;
983
        clip.t = winy+dst_y;
984
        clip.r = clip.l+w-1;
985
        clip.b = clip.t+h-1;
986
 
987
        kgem_set_mode(&sna_device->kgem, KGEM_RENDER, sna_fb.fb_bo);
988
        sna_wait_for_scanline(sna_device, &crtc, &clip);
989
    }
4374 Serge 990
#endif
4368 Serge 991
 
4304 Serge 992
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
4372 Serge 993
              &src, src_bo,
994
              &mask, mask_bo,
995
              &dst, sna_fb.fb_bo,
4304 Serge 996
              src_x, src_y,
997
              dst_x, dst_y,
998
              winx+dst_x, winy+dst_y,
999
              w, h,
1000
              &composite) )
1001
    {
4372 Serge 1002
        struct sna_composite_rectangles r;
4304 Serge 1003
 
4372 Serge 1004
        r.src.x = src_x;
1005
        r.src.y = src_y;
1006
        r.mask.x = dst_x;
1007
        r.mask.y = dst_y;
1008
        r.dst.x = winx+dst_x;
1009
        r.dst.y = winy+dst_y;
1010
        r.width  = w;
1011
        r.height = h;
4304 Serge 1012
 
1013
        composite.blt(sna_device, &composite, &r);
1014
        composite.done(sna_device, &composite);
1015
 
1016
    };
1017
 
1018
    kgem_submit(&sna_device->kgem);
1019
 
1020
    __lock_release_recursive(__sna_lock);
1021
 
1022
    bitmap->data   = (void*)-1;
1023
    bitmap->pitch  = -1;
1024
 
1025
    return 0;
1026
}
1027
 
1028
 
4372 Serge 1029
static void sna_fini()
1030
{
1031
    ENTER();
4304 Serge 1032
 
4372 Serge 1033
    if( sna_device )
1034
    {
1035
        struct kgem_bo *mask;
4304 Serge 1036
 
4372 Serge 1037
        __lock_acquire_recursive(__sna_lock);
4304 Serge 1038
 
4372 Serge 1039
        mask = tls_get(tls_mask);
4304 Serge 1040
 
4372 Serge 1041
        sna_device->render.fini(sna_device);
1042
        if(mask)
1043
            kgem_bo_destroy(&sna_device->kgem, mask);
1044
//        kgem_close_batches(&sna_device->kgem);
1045
        kgem_cleanup_cache(&sna_device->kgem);
4304 Serge 1046
 
4372 Serge 1047
        sna_device = NULL;
1048
        __lock_release_recursive(__sna_lock);
1049
    };
1050
    LEAVE();
1051
}
4304 Serge 1052
 
4372 Serge 1053
uint32_t DrvInit(uint32_t service, struct pix_driver *driver)
1054
{
1055
    ioctl_t   io;
1056
    int caps = 0;
4304 Serge 1057
 
4372 Serge 1058
    static struct pci_device device;
1059
    struct sna *sna;
4304 Serge 1060
 
4372 Serge 1061
    DBG(("%s\n", __FUNCTION__));
4304 Serge 1062
 
4372 Serge 1063
    __lock_acquire_recursive(__sna_lock);
4304 Serge 1064
 
4372 Serge 1065
    if(sna_device)
1066
        goto done;
4304 Serge 1067
 
4372 Serge 1068
    io.handle   = service;
1069
    io.io_code  = SRV_GET_PCI_INFO;
1070
    io.input    = &device;
1071
    io.inp_size = sizeof(device);
1072
    io.output   = NULL;
1073
    io.out_size = 0;
4304 Serge 1074
 
4372 Serge 1075
    if (call_service(&io)!=0)
1076
        goto err1;
4304 Serge 1077
 
4372 Serge 1078
    sna = malloc(sizeof(*sna));
1079
    if (sna == NULL)
1080
        goto err1;
4304 Serge 1081
 
4372 Serge 1082
    memset(sna, 0, sizeof(*sna));
4304 Serge 1083
 
4372 Serge 1084
    sna->cpu_features = sna_cpu_detect();
4304 Serge 1085
 
4372 Serge 1086
    sna->PciInfo = &device;
1087
    sna->info = intel_detect_chipset(sna->PciInfo);
1088
    sna->scrn = service;
4304 Serge 1089
 
4372 Serge 1090
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
4304 Serge 1091
 
4372 Serge 1092
    /* Disable tiling by default */
1093
    sna->tiling = 0;
4304 Serge 1094
 
4372 Serge 1095
    /* Default fail-safe value of 75 Hz */
1096
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
4304 Serge 1097
 
4372 Serge 1098
    sna->flags = 0;
4304 Serge 1099
 
4372 Serge 1100
    sna_accel_init(sna);
4304 Serge 1101
 
4372 Serge 1102
    tls_mask = tls_alloc();
4304 Serge 1103
 
4372 Serge 1104
//    printf("tls mask %x\n", tls_mask);
4304 Serge 1105
 
4372 Serge 1106
    driver->create_bitmap  = sna_create_bitmap;
1107
    driver->destroy_bitmap = sna_destroy_bitmap;
1108
    driver->lock_bitmap    = sna_lock_bitmap;
1109
    driver->blit           = sna_blit_tex;
1110
    driver->resize_bitmap  = sna_resize_bitmap;
1111
    driver->fini           = sna_fini;
1112
done:
1113
    caps = sna_device->render.caps;
4304 Serge 1114
 
4372 Serge 1115
err1:
1116
    __lock_release_recursive(__sna_lock);
4304 Serge 1117
 
4372 Serge 1118
    return caps;
4304 Serge 1119
}
1120
 
1121
 
1122