Subversion Repositories Kolibri OS

Rev

Rev 3769 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
4251 Serge 1
/**************************************************************************
3254 Serge 2
 
4251 Serge 3
Copyright 2001 VA Linux Systems Inc., Fremont, California.
4
Copyright © 2002 by David Dawes
3769 Serge 5
 
4251 Serge 6
All Rights Reserved.
7
 
8
Permission is hereby granted, free of charge, to any person obtaining a
9
copy of this software and associated documentation files (the "Software"),
10
to deal in the Software without restriction, including without limitation
11
on the rights to use, copy, modify, merge, publish, distribute, sub
12
license, and/or sell copies of the Software, and to permit persons to whom
13
the Software is furnished to do so, subject to the following conditions:
14
 
15
The above copyright notice and this permission notice (including the next
16
paragraph) shall be included in all copies or substantial portions of the
17
Software.
18
 
19
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22
THE COPYRIGHT HOLDERS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
23
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
24
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
25
USE OR OTHER DEALINGS IN THE SOFTWARE.
26
 
27
**************************************************************************/
28
 
29
/*
30
 * Authors: Jeff Hartmann 
31
 *          Abraham van der Merwe 
32
 *          David Dawes 
33
 *          Alan Hourihane 
34
 */
35
 
36
#ifdef HAVE_CONFIG_H
37
#include "config.h"
38
#endif
39
 
3254 Serge 40
#include 
41
#include 
3769 Serge 42
#include 
43
#include 
4251 Serge 44
#include "i915_pciids.h"
3254 Serge 45
 
4251 Serge 46
#include "compiler.h"
3254 Serge 47
#include "sna.h"
4251 Serge 48
#include "intel_driver.h"
3254 Serge 49
 
3769 Serge 50
#define to_surface(x) (surface_t*)((x)->handle)
3263 Serge 51
 
52
static struct sna_fb sna_fb;
3769 Serge 53
static int    tls_mask;
3263 Serge 54
 
3769 Serge 55
int tls_alloc(void);
3291 Serge 56
 
3769 Serge 57
static inline void *tls_get(int key)
3258 Serge 58
{
3769 Serge 59
    void *val;
60
    __asm__ __volatile__(
61
    "movl %%fs:(%1), %0"
62
    :"=r"(val)
63
    :"r"(key));
3254 Serge 64
 
3769 Serge 65
  return val;
66
};
3258 Serge 67
 
3769 Serge 68
static inline int
69
tls_set(int key, const void *ptr)
3258 Serge 70
{
3769 Serge 71
    if(!(key & 3))
72
    {
73
        __asm__ __volatile__(
74
        "movl %0, %%fs:(%1)"
75
        ::"r"(ptr),"r"(key));
76
        return 0;
77
    }
78
    else return -1;
79
}
3258 Serge 80
 
81
 
82
 
3266 Serge 83
 
3769 Serge 84
int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
85
int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
86
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
87
				  unsigned flags, uint32_t width, uint32_t height,
88
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
4251 Serge 89
struct kgem_bo *kgem_bo_from_handle(struct kgem *kgem, int handle,
90
                        int pitch, int height);
3769 Serge 91
 
92
void kgem_close_batches(struct kgem *kgem);
93
void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
94
 
3254 Serge 95
const struct intel_device_info *
96
intel_detect_chipset(struct pci_device *pci);
97
 
98
static bool sna_solid_cache_init(struct sna *sna);
99
 
100
struct sna *sna_device;
101
 
3769 Serge 102
__LOCK_INIT_RECURSIVE(, __sna_lock);
103
 
3258 Serge 104
static void no_render_reset(struct sna *sna)
105
{
106
	(void)sna;
107
}
108
 
4251 Serge 109
static void no_render_flush(struct sna *sna)
3254 Serge 110
{
4251 Serge 111
	(void)sna;
112
}
113
 
114
static void
115
no_render_context_switch(struct kgem *kgem,
116
			 int new_mode)
117
{
118
	if (!kgem->nbatch)
119
		return;
120
 
121
	if (kgem_ring_is_idle(kgem, kgem->ring)) {
122
		DBG(("%s: GPU idle, flushing\n", __FUNCTION__));
123
		_kgem_submit(kgem);
124
	}
125
 
126
	(void)new_mode;
127
}
128
 
129
static void
130
no_render_retire(struct kgem *kgem)
131
{
132
	(void)kgem;
133
}
134
 
135
static void
136
no_render_expire(struct kgem *kgem)
137
{
138
	(void)kgem;
139
}
140
 
141
static void
142
no_render_fini(struct sna *sna)
143
{
144
	(void)sna;
145
}
146
 
147
const char *no_render_init(struct sna *sna)
148
{
3254 Serge 149
    struct sna_render *render = &sna->render;
150
 
151
    memset (render,0, sizeof (*render));
152
 
153
    render->prefer_gpu = PREFER_GPU_BLT;
154
 
155
    render->vertices = render->vertex_data;
156
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
157
 
3258 Serge 158
    render->reset = no_render_reset;
4251 Serge 159
	render->flush = no_render_flush;
160
	render->fini = no_render_fini;
3254 Serge 161
 
4251 Serge 162
	sna->kgem.context_switch = no_render_context_switch;
163
	sna->kgem.retire = no_render_retire;
164
	sna->kgem.expire = no_render_expire;
3254 Serge 165
 
4251 Serge 166
	sna->kgem.mode = KGEM_RENDER;
167
	sna->kgem.ring = KGEM_RENDER;
3254 Serge 168
 
4251 Serge 169
	sna_vertex_init(sna);
170
	return "generic";
171
 }
3254 Serge 172
 
173
void sna_vertex_init(struct sna *sna)
174
{
175
//    pthread_mutex_init(&sna->render.lock, NULL);
176
//    pthread_cond_init(&sna->render.wait, NULL);
177
    sna->render.active = 0;
178
}
179
 
3291 Serge 180
int sna_accel_init(struct sna *sna)
3254 Serge 181
{
182
    const char *backend;
183
 
4251 Serge 184
	backend = no_render_init(sna);
185
	if (sna->info->gen >= 0100)
186
		(void)backend;
187
	else if (sna->info->gen >= 070)
188
		backend = gen7_render_init(sna, backend);
189
	else if (sna->info->gen >= 060)
190
		backend = gen6_render_init(sna, backend);
191
	else if (sna->info->gen >= 050)
192
		backend = gen5_render_init(sna, backend);
193
	else if (sna->info->gen >= 040)
194
		backend = gen4_render_init(sna, backend);
195
	else if (sna->info->gen >= 030)
196
		backend = gen3_render_init(sna, backend);
3254 Serge 197
 
198
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
199
	     __FUNCTION__, backend, sna->render.prefer_gpu));
200
 
4251 Serge 201
	kgem_reset(&sna->kgem);
3254 Serge 202
 
203
    sna_device = sna;
204
 
3263 Serge 205
    return kgem_init_fb(&sna->kgem, &sna_fb);
3254 Serge 206
}
207
 
208
int sna_init(uint32_t service)
209
{
210
    ioctl_t   io;
3769 Serge 211
    int caps = 0;
3254 Serge 212
 
213
    static struct pci_device device;
214
    struct sna *sna;
215
 
216
    DBG(("%s\n", __FUNCTION__));
217
 
3769 Serge 218
    __lock_acquire_recursive(__sna_lock);
3254 Serge 219
 
3769 Serge 220
    if(sna_device)
221
        goto done;
4251 Serge 222
 
3254 Serge 223
    io.handle   = service;
3256 Serge 224
    io.io_code  = SRV_GET_PCI_INFO;
3254 Serge 225
    io.input    = &device;
226
    io.inp_size = sizeof(device);
227
    io.output   = NULL;
228
    io.out_size = 0;
229
 
230
    if (call_service(&io)!=0)
3769 Serge 231
        goto err1;
4251 Serge 232
 
3769 Serge 233
    sna = malloc(sizeof(*sna));
234
    if (sna == NULL)
235
        goto err1;
236
 
237
    memset(sna, 0, sizeof(*sna));
4251 Serge 238
 
239
    sna->cpu_features = sna_cpu_detect();
240
 
3254 Serge 241
    sna->PciInfo = &device;
242
  	sna->info = intel_detect_chipset(sna->PciInfo);
4251 Serge 243
    sna->scrn = service;
3254 Serge 244
 
245
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
246
 
4251 Serge 247
 
3254 Serge 248
    /* Disable tiling by default */
4251 Serge 249
    sna->tiling = 0;
3254 Serge 250
 
251
    /* Default fail-safe value of 75 Hz */
252
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
253
 
254
    sna->flags = 0;
255
 
3291 Serge 256
    sna_accel_init(sna);
257
 
3769 Serge 258
    tls_mask = tls_alloc();
4251 Serge 259
 
3769 Serge 260
//    printf("tls mask %x\n", tls_mask);
4251 Serge 261
 
3769 Serge 262
done:
263
    caps = sna_device->render.caps;
264
 
265
err1:
266
    __lock_release_recursive(__sna_lock);
4251 Serge 267
 
268
    return caps;
3254 Serge 269
}
270
 
3291 Serge 271
void sna_fini()
272
{
273
    if( sna_device )
274
    {
3769 Serge 275
        struct kgem_bo *mask;
4251 Serge 276
 
3769 Serge 277
        __lock_acquire_recursive(__sna_lock);
4251 Serge 278
 
3769 Serge 279
        mask = tls_get(tls_mask);
4251 Serge 280
 
3291 Serge 281
        sna_device->render.fini(sna_device);
3769 Serge 282
        if(mask)
283
            kgem_bo_destroy(&sna_device->kgem, mask);
4251 Serge 284
        kgem_close_batches(&sna_device->kgem);
3291 Serge 285
   	    kgem_cleanup_cache(&sna_device->kgem);
4251 Serge 286
 
3769 Serge 287
   	    sna_device = NULL;
288
        __lock_release_recursive(__sna_lock);
3291 Serge 289
    };
290
}
291
 
3254 Serge 292
#if 0
293
 
294
static bool sna_solid_cache_init(struct sna *sna)
295
{
296
    struct sna_solid_cache *cache = &sna->render.solid_cache;
297
 
298
    DBG(("%s\n", __FUNCTION__));
299
 
300
    cache->cache_bo =
301
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
302
    if (!cache->cache_bo)
303
        return FALSE;
304
 
305
    /*
306
     * Initialise [0] with white since it is very common and filling the
307
     * zeroth slot simplifies some of the checks.
308
     */
309
    cache->color[0] = 0xffffffff;
310
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
311
    cache->bo[0]->pitch = 4;
312
    cache->dirty = 1;
313
    cache->size = 1;
314
    cache->last = 0;
315
 
316
    return TRUE;
317
}
318
 
319
void
320
sna_render_flush_solid(struct sna *sna)
321
{
322
    struct sna_solid_cache *cache = &sna->render.solid_cache;
323
 
324
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
325
    assert(cache->dirty);
326
    assert(cache->size);
327
 
328
    kgem_bo_write(&sna->kgem, cache->cache_bo,
329
              cache->color, cache->size*sizeof(uint32_t));
330
    cache->dirty = 0;
331
    cache->last = 0;
332
}
333
 
334
static void
335
sna_render_finish_solid(struct sna *sna, bool force)
336
{
337
    struct sna_solid_cache *cache = &sna->render.solid_cache;
338
    int i;
339
 
340
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
341
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
342
 
343
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
344
        return;
345
 
346
    if (cache->dirty)
347
        sna_render_flush_solid(sna);
348
 
349
    for (i = 0; i < cache->size; i++) {
350
        if (cache->bo[i] == NULL)
351
            continue;
352
 
353
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
354
        cache->bo[i] = NULL;
355
    }
356
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
357
 
358
    DBG(("sna_render_finish_solid reset\n"));
359
 
360
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
361
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
362
    cache->bo[0]->pitch = 4;
363
    if (force)
364
        cache->size = 1;
365
}
366
 
367
 
368
struct kgem_bo *
369
sna_render_get_solid(struct sna *sna, uint32_t color)
370
{
371
    struct sna_solid_cache *cache = &sna->render.solid_cache;
372
    int i;
373
 
374
    DBG(("%s: %08x\n", __FUNCTION__, color));
375
 
376
//    if ((color & 0xffffff) == 0) /* alpha only */
377
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
378
 
379
    if (color == 0xffffffff) {
380
        DBG(("%s(white)\n", __FUNCTION__));
381
        return kgem_bo_reference(cache->bo[0]);
382
    }
383
 
384
    if (cache->color[cache->last] == color) {
385
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
386
             cache->last, color));
387
        return kgem_bo_reference(cache->bo[cache->last]);
388
    }
389
 
390
    for (i = 1; i < cache->size; i++) {
391
        if (cache->color[i] == color) {
392
            if (cache->bo[i] == NULL) {
393
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
394
                     i, color));
395
                goto create;
396
            } else {
397
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
398
                     i, color));
399
                goto done;
400
            }
401
        }
402
    }
403
 
404
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
405
 
406
    i = cache->size++;
407
    cache->color[i] = color;
408
    cache->dirty = 1;
409
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
410
 
411
create:
412
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
413
                     i*sizeof(uint32_t), sizeof(uint32_t));
414
    cache->bo[i]->pitch = 4;
415
 
416
done:
417
    cache->last = i;
418
    return kgem_bo_reference(cache->bo[i]);
419
}
420
 
3769 Serge 421
#endif
3254 Serge 422
 
423
 
3263 Serge 424
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
425
                  int w, int h, int src_x, int src_y)
3254 Serge 426
 
427
{
428
    struct sna_copy_op copy;
3263 Serge 429
    struct _Pixmap src, dst;
430
    struct kgem_bo *src_bo;
3254 Serge 431
 
3266 Serge 432
    char proc_info[1024];
433
    int winx, winy;
434
 
435
    get_proc_info(proc_info);
436
 
437
    winx = *(uint32_t*)(proc_info+34);
438
    winy = *(uint32_t*)(proc_info+38);
4251 Serge 439
 
3263 Serge 440
    memset(&src, 0, sizeof(src));
441
    memset(&dst, 0, sizeof(dst));
3254 Serge 442
 
3263 Serge 443
    src.drawable.bitsPerPixel = 32;
444
    src.drawable.width  = src_bitmap->width;
445
    src.drawable.height = src_bitmap->height;
3254 Serge 446
 
3263 Serge 447
    dst.drawable.bitsPerPixel = 32;
448
    dst.drawable.width  = sna_fb.width;
449
    dst.drawable.height = sna_fb.height;
4251 Serge 450
 
3254 Serge 451
    memset(©, 0, sizeof(copy));
452
 
3263 Serge 453
    src_bo = (struct kgem_bo*)src_bitmap->handle;
4251 Serge 454
 
3263 Serge 455
    if( sna_device->render.copy(sna_device, GXcopy,
456
                                &src, src_bo,
457
                                &dst, sna_fb.fb_bo, ©) )
4251 Serge 458
    {
3266 Serge 459
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
3291 Serge 460
        copy.done(sna_device, ©);
3263 Serge 461
    }
3254 Serge 462
 
3263 Serge 463
    kgem_submit(&sna_device->kgem);
4251 Serge 464
 
3769 Serge 465
    return 0;
4251 Serge 466
 
3263 Serge 467
//    __asm__ __volatile__("int3");
4251 Serge 468
 
3254 Serge 469
};
470
 
4251 Serge 471
typedef struct
3769 Serge 472
{
473
    uint32_t        width;
474
    uint32_t        height;
475
    void           *data;
476
    uint32_t        pitch;
4251 Serge 477
    struct kgem_bo *bo;
478
    uint32_t        bo_size;
479
    uint32_t        flags;
3769 Serge 480
}surface_t;
3280 Serge 481
 
3769 Serge 482
 
483
 
3263 Serge 484
int sna_create_bitmap(bitmap_t *bitmap)
485
{
3769 Serge 486
    surface_t *sf;
3263 Serge 487
	struct kgem_bo *bo;
4251 Serge 488
 
3769 Serge 489
    sf = malloc(sizeof(*sf));
490
    if(sf == NULL)
491
        goto err_1;
4251 Serge 492
 
3769 Serge 493
    __lock_acquire_recursive(__sna_lock);
494
 
3263 Serge 495
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
496
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
4251 Serge 497
 
3263 Serge 498
    if(bo == NULL)
3769 Serge 499
        goto err_2;
4251 Serge 500
 
3263 Serge 501
    void *map = kgem_bo_map(&sna_device->kgem, bo);
502
    if(map == NULL)
3769 Serge 503
        goto err_3;
4251 Serge 504
 
3769 Serge 505
    sf->width   = bitmap->width;
506
    sf->height  = bitmap->height;
507
    sf->data    = map;
508
    sf->pitch   = bo->pitch;
509
    sf->bo      = bo;
510
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
511
    sf->flags   = bitmap->flags;
4251 Serge 512
 
3769 Serge 513
    bitmap->handle = (uint32_t)sf;
514
    __lock_release_recursive(__sna_lock);
4251 Serge 515
 
3263 Serge 516
    return 0;
4251 Serge 517
 
3769 Serge 518
err_3:
519
    kgem_bo_destroy(&sna_device->kgem, bo);
3263 Serge 520
err_2:
3769 Serge 521
    __lock_release_recursive(__sna_lock);
4251 Serge 522
    free(sf);
3263 Serge 523
err_1:
4251 Serge 524
    return -1;
3769 Serge 525
};
526
 
4251 Serge 527
int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
528
{
529
    surface_t *sf;
530
	struct kgem_bo *bo;
531
 
532
    sf = malloc(sizeof(*sf));
533
    if(sf == NULL)
534
        goto err_1;
535
 
536
    __lock_acquire_recursive(__sna_lock);
537
 
538
    bo = kgem_bo_from_handle(&sna_device->kgem, handle, bitmap->pitch, bitmap->height);
539
 
540
    __lock_release_recursive(__sna_lock);
541
 
542
    sf->width   = bitmap->width;
543
    sf->height  = bitmap->height;
544
    sf->data    = NULL;
545
    sf->pitch   = bo->pitch;
546
    sf->bo      = bo;
547
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
548
    sf->flags   = bitmap->flags;
549
 
550
    bitmap->handle = (uint32_t)sf;
551
 
552
    return 0;
553
 
554
err_2:
555
    __lock_release_recursive(__sna_lock);
556
    free(sf);
557
err_1:
558
    return -1;
559
};
560
 
561
void sna_set_bo_handle(bitmap_t *bitmap, int handle)
562
{
563
    surface_t *sf = to_surface(bitmap);
564
    struct kgem_bo *bo = sf->bo;
565
    bo->handle = handle;
566
}
567
 
3769 Serge 568
int sna_destroy_bitmap(bitmap_t *bitmap)
569
{
570
    surface_t *sf = to_surface(bitmap);
4251 Serge 571
 
3769 Serge 572
    __lock_acquire_recursive(__sna_lock);
4251 Serge 573
 
3769 Serge 574
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
4251 Serge 575
 
3769 Serge 576
    __lock_release_recursive(__sna_lock);
577
 
578
    free(sf);
4251 Serge 579
 
3769 Serge 580
    bitmap->handle = -1;
581
    bitmap->data   = (void*)-1;
582
    bitmap->pitch  = -1;
583
 
584
    return 0;
3263 Serge 585
};
3266 Serge 586
 
3769 Serge 587
int sna_lock_bitmap(bitmap_t *bitmap)
3291 Serge 588
{
4251 Serge 589
    surface_t *sf = to_surface(bitmap);
590
 
3769 Serge 591
//    printf("%s\n", __FUNCTION__);
592
    __lock_acquire_recursive(__sna_lock);
4251 Serge 593
 
3769 Serge 594
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
595
 
596
    __lock_release_recursive(__sna_lock);
4251 Serge 597
 
3769 Serge 598
    bitmap->data  = sf->data;
4251 Serge 599
    bitmap->pitch = sf->pitch;
3291 Serge 600
 
3769 Serge 601
    return 0;
3291 Serge 602
};
603
 
3769 Serge 604
int sna_resize_bitmap(bitmap_t *bitmap)
3266 Serge 605
{
3769 Serge 606
    surface_t *sf = to_surface(bitmap);
607
    struct kgem *kgem = &sna_device->kgem;
4251 Serge 608
    struct kgem_bo *bo = sf->bo;
609
 
3769 Serge 610
    uint32_t   size;
611
    uint32_t   pitch;
612
 
613
   	bitmap->pitch = -1;
614
    bitmap->data = (void *) -1;
615
 
616
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
617
				 bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
618
	assert(size && size <= kgem->max_object_size);
4251 Serge 619
 
3769 Serge 620
    if(sf->bo_size >= size)
621
    {
622
        sf->width   = bitmap->width;
623
        sf->height  = bitmap->height;
624
        sf->pitch   = pitch;
4251 Serge 625
        bo->pitch   = pitch;
626
 
3769 Serge 627
	    return 0;
628
    }
629
    else
630
    {
631
        __lock_acquire_recursive(__sna_lock);
4251 Serge 632
 
3769 Serge 633
        sna_bo_destroy(kgem, bo);
4251 Serge 634
 
3769 Serge 635
        sf->bo = NULL;
4251 Serge 636
 
3769 Serge 637
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
638
                            32, I915_TILING_NONE, CREATE_CPU_MAP);
3266 Serge 639
 
3769 Serge 640
        if(bo == NULL)
641
        {
642
            __lock_release_recursive(__sna_lock);
643
            return -1;
644
        };
4251 Serge 645
 
3769 Serge 646
        void *map = kgem_bo_map(kgem, bo);
647
        if(map == NULL)
648
        {
649
            sna_bo_destroy(kgem, bo);
650
            __lock_release_recursive(__sna_lock);
651
            return -1;
652
        };
4251 Serge 653
 
3769 Serge 654
        __lock_release_recursive(__sna_lock);
4251 Serge 655
 
3769 Serge 656
        sf->width   = bitmap->width;
657
        sf->height  = bitmap->height;
658
        sf->data    = map;
659
        sf->pitch   = bo->pitch;
660
        sf->bo      = bo;
661
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
662
    }
663
 
4251 Serge 664
    return 0;
3266 Serge 665
};
666
 
3769 Serge 667
 
668
 
3278 Serge 669
int sna_create_mask()
670
{
671
	struct kgem_bo *bo;
3266 Serge 672
 
3299 Serge 673
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
4251 Serge 674
 
3769 Serge 675
    __lock_acquire_recursive(__sna_lock);
4251 Serge 676
 
3291 Serge 677
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
3278 Serge 678
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
4251 Serge 679
 
3769 Serge 680
    if(unlikely(bo == NULL))
3278 Serge 681
        goto err_1;
4251 Serge 682
 
3278 Serge 683
    int *map = kgem_bo_map(&sna_device->kgem, bo);
684
    if(map == NULL)
685
        goto err_2;
4251 Serge 686
 
3769 Serge 687
    __lock_release_recursive(__sna_lock);
4251 Serge 688
 
3769 Serge 689
    memset(map, 0, bo->pitch * sna_fb.height);
4251 Serge 690
 
3769 Serge 691
    tls_set(tls_mask, bo);
4251 Serge 692
 
3278 Serge 693
    return 0;
4251 Serge 694
 
3278 Serge 695
err_2:
696
    kgem_bo_destroy(&sna_device->kgem, bo);
697
err_1:
3769 Serge 698
    __lock_release_recursive(__sna_lock);
4251 Serge 699
    return -1;
3278 Serge 700
};
3254 Serge 701
 
3278 Serge 702
 
3299 Serge 703
bool
704
gen6_composite(struct sna *sna,
705
              uint8_t op,
706
		      PixmapPtr src, struct kgem_bo *src_bo,
707
		      PixmapPtr mask,struct kgem_bo *mask_bo,
4251 Serge 708
		      PixmapPtr dst, struct kgem_bo *dst_bo,
3299 Serge 709
              int32_t src_x, int32_t src_y,
710
              int32_t msk_x, int32_t msk_y,
711
              int32_t dst_x, int32_t dst_y,
712
              int32_t width, int32_t height,
713
              struct sna_composite_op *tmp);
3278 Serge 714
 
715
 
716
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
717
 
3769 Serge 718
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
3278 Serge 719
                  int w, int h, int src_x, int src_y)
720
 
3254 Serge 721
{
4251 Serge 722
    surface_t *sf = to_surface(bitmap);
3254 Serge 723
 
3278 Serge 724
    struct drm_i915_mask_update update;
4251 Serge 725
 
3278 Serge 726
    struct sna_composite_op composite;
727
    struct _Pixmap src, dst, mask;
3769 Serge 728
    struct kgem_bo *src_bo, *mask_bo;
729
    int winx, winy;
3254 Serge 730
 
3278 Serge 731
    char proc_info[1024];
3254 Serge 732
 
3278 Serge 733
    get_proc_info(proc_info);
3254 Serge 734
 
3278 Serge 735
    winx = *(uint32_t*)(proc_info+34);
736
    winy = *(uint32_t*)(proc_info+38);
3769 Serge 737
//    winw = *(uint32_t*)(proc_info+42)+1;
738
//    winh = *(uint32_t*)(proc_info+46)+1;
4251 Serge 739
 
3769 Serge 740
    mask_bo = tls_get(tls_mask);
4251 Serge 741
 
3769 Serge 742
    if(unlikely(mask_bo == NULL))
743
    {
744
        sna_create_mask();
745
        mask_bo = tls_get(tls_mask);
746
        if( mask_bo == NULL)
4251 Serge 747
            return -1;
3769 Serge 748
    };
4251 Serge 749
 
3769 Serge 750
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
751
    {
752
        __lock_acquire_recursive(__sna_lock);
753
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
754
        __lock_release_recursive(__sna_lock);
4251 Serge 755
 
3769 Serge 756
        sna_create_mask();
757
        mask_bo = tls_get(tls_mask);
758
        if( mask_bo == NULL)
4251 Serge 759
            return -1;
3769 Serge 760
    }
4251 Serge 761
 
3291 Serge 762
    VG_CLEAR(update);
763
	update.handle = mask_bo->handle;
4251 Serge 764
	update.bo_map = (int)kgem_bo_map__cpu(&sna_device->kgem, mask_bo);
3291 Serge 765
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
766
    mask_bo->pitch = update.bo_pitch;
4251 Serge 767
 
3278 Serge 768
    memset(&src, 0, sizeof(src));
769
    memset(&dst, 0, sizeof(dst));
770
    memset(&mask, 0, sizeof(dst));
771
 
772
    src.drawable.bitsPerPixel = 32;
4251 Serge 773
 
3769 Serge 774
    src.drawable.width  = sf->width;
775
    src.drawable.height = sf->height;
3278 Serge 776
 
777
    dst.drawable.bitsPerPixel = 32;
778
    dst.drawable.width  = sna_fb.width;
779
    dst.drawable.height = sna_fb.height;
4251 Serge 780
 
3278 Serge 781
    mask.drawable.bitsPerPixel = 8;
3291 Serge 782
    mask.drawable.width  = update.width;
783
    mask.drawable.height = update.height;
3278 Serge 784
 
785
    memset(&composite, 0, sizeof(composite));
786
 
3769 Serge 787
    src_bo = sf->bo;
4251 Serge 788
 
3769 Serge 789
    __lock_acquire_recursive(__sna_lock);
790
 
4251 Serge 791
 
3769 Serge 792
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
3299 Serge 793
		      &src, src_bo,
794
		      &mask, mask_bo,
4251 Serge 795
		      &dst, sna_fb.fb_bo,
3299 Serge 796
              src_x, src_y,
797
              dst_x, dst_y,
798
              winx+dst_x, winy+dst_y,
799
              w, h,
3278 Serge 800
              &composite) )
801
    {
3299 Serge 802
	    struct sna_composite_rectangles r;
4251 Serge 803
 
3278 Serge 804
	    r.src.x = src_x;
805
	    r.src.y = src_y;
806
	    r.mask.x = dst_x;
807
	    r.mask.y = dst_y;
3299 Serge 808
		r.dst.x = winx+dst_x;
3278 Serge 809
	    r.dst.y = winy+dst_y;
3299 Serge 810
	    r.width  = w;
811
	    r.height = h;
4251 Serge 812
 
3278 Serge 813
        composite.blt(sna_device, &composite, &r);
814
        composite.done(sna_device, &composite);
4251 Serge 815
 
3278 Serge 816
    };
4251 Serge 817
 
3278 Serge 818
    kgem_submit(&sna_device->kgem);
4251 Serge 819
 
3769 Serge 820
    __lock_release_recursive(__sna_lock);
821
 
822
    bitmap->data   = (void*)-1;
823
    bitmap->pitch  = -1;
4251 Serge 824
 
825
    return 0;
3278 Serge 826
}
3254 Serge 827
 
828
 
829
 
830
 
831
 
3278 Serge 832
 
833
 
3254 Serge 834
static const struct intel_device_info intel_generic_info = {
835
	.gen = -1,
836
};
837
 
838
static const struct intel_device_info intel_i915_info = {
839
	.gen = 030,
840
};
841
static const struct intel_device_info intel_i945_info = {
842
	.gen = 031,
843
};
844
 
845
static const struct intel_device_info intel_g33_info = {
846
	.gen = 033,
847
};
848
 
849
static const struct intel_device_info intel_i965_info = {
850
	.gen = 040,
851
};
852
 
853
static const struct intel_device_info intel_g4x_info = {
854
	.gen = 045,
855
};
856
 
857
static const struct intel_device_info intel_ironlake_info = {
858
	.gen = 050,
859
};
860
 
861
static const struct intel_device_info intel_sandybridge_info = {
862
	.gen = 060,
863
};
864
 
865
static const struct intel_device_info intel_ivybridge_info = {
866
	.gen = 070,
867
};
868
 
869
static const struct intel_device_info intel_valleyview_info = {
870
	.gen = 071,
871
};
872
 
873
static const struct intel_device_info intel_haswell_info = {
874
	.gen = 075,
875
};
876
 
877
#define INTEL_DEVICE_MATCH(d,i) \
878
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
879
 
880
 
881
static const struct pci_id_match intel_device_match[] = {
882
 
4251 Serge 883
	INTEL_I915G_IDS(&intel_i915_info),
884
	INTEL_I915GM_IDS(&intel_i915_info),
885
	INTEL_I945G_IDS(&intel_i945_info),
886
	INTEL_I945GM_IDS(&intel_i945_info),
3254 Serge 887
 
4251 Serge 888
	INTEL_G33_IDS(&intel_g33_info),
889
	INTEL_PINEVIEW_IDS(&intel_g33_info),
3254 Serge 890
 
4251 Serge 891
	INTEL_I965G_IDS(&intel_i965_info),
892
	INTEL_I965GM_IDS(&intel_i965_info),
3254 Serge 893
 
4251 Serge 894
	INTEL_G45_IDS(&intel_g4x_info),
895
	INTEL_GM45_IDS(&intel_g4x_info),
3254 Serge 896
 
4251 Serge 897
	INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
898
	INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
3254 Serge 899
 
4251 Serge 900
	INTEL_SNB_D_IDS(&intel_sandybridge_info),
901
	INTEL_SNB_M_IDS(&intel_sandybridge_info),
3254 Serge 902
 
4251 Serge 903
	INTEL_IVB_D_IDS(&intel_ivybridge_info),
904
	INTEL_IVB_M_IDS(&intel_ivybridge_info),
3254 Serge 905
 
4251 Serge 906
	INTEL_HSW_D_IDS(&intel_haswell_info),
907
	INTEL_HSW_M_IDS(&intel_haswell_info),
3254 Serge 908
 
4251 Serge 909
	INTEL_VLV_D_IDS(&intel_valleyview_info),
910
	INTEL_VLV_M_IDS(&intel_valleyview_info),
3254 Serge 911
 
4251 Serge 912
	INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
3254 Serge 913
 
914
	{ 0, 0, 0 },
915
};
916
 
917
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
918
{
919
    while(list->device_id)
920
    {
921
        if(dev==list->device_id)
922
            return list;
923
        list++;
924
    }
925
    return NULL;
926
}
927
 
928
const struct intel_device_info *
929
intel_detect_chipset(struct pci_device *pci)
930
{
931
    const struct pci_id_match *ent = NULL;
932
 
4251 Serge 933
    ent = PciDevMatch(pci->device_id, intel_device_match);
934
 
3254 Serge 935
    if(ent != NULL)
936
        return (const struct intel_device_info*)ent->match_data;
4251 Serge 937
    else
3254 Serge 938
        return &intel_generic_info;
4251 Serge 939
 
940
#if 0
3254 Serge 941
	for (i = 0; intel_chipsets[i].name != NULL; i++) {
942
		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
943
			name = intel_chipsets[i].name;
944
			break;
945
		}
946
	}
947
	if (name == NULL) {
948
		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
949
		name = "unknown";
950
	} else {
951
		xf86DrvMsg(scrn->scrnIndex, from,
952
			   "Integrated Graphics Chipset: Intel(R) %s\n",
953
			   name);
954
	}
955
 
956
	scrn->chipset = name;
957
#endif
4251 Serge 958
 
3254 Serge 959
}
960
 
4251 Serge 961
int intel_get_device_id(int fd)
962
{
963
	struct drm_i915_getparam gp;
964
	int devid = 0;
3254 Serge 965
 
4251 Serge 966
	memset(&gp, 0, sizeof(gp));
967
	gp.param = I915_PARAM_CHIPSET_ID;
968
	gp.value = &devid;
969
 
970
	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
971
		return 0;
972
 
973
	return devid;
974
}
975
 
3258 Serge 976
int drmIoctl(int fd, unsigned long request, void *arg)
977
{
978
    ioctl_t  io;
3254 Serge 979
 
3258 Serge 980
    io.handle   = fd;
981
    io.io_code  = request;
982
    io.input    = arg;
983
    io.inp_size = 64;
984
    io.output   = NULL;
985
    io.out_size = 0;
3254 Serge 986
 
3258 Serge 987
    return call_service(&io);
988
}
989