Subversion Repositories Kolibri OS

Rev

Rev 3299 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
 
3769 Serge 2
 
3254 Serge 3
#include 
4
#include 
3769 Serge 5
6
#include "sna.h"
3254 Serge 7
 
8
#define to_surface(x) (surface_t*)((x)->handle)
9
 
3769 Serge 10
static struct sna_fb sna_fb;
3263 Serge 11
 
12
3769 Serge 13
int tls_alloc(void);
3263 Serge 14
 
3769 Serge 15
static inline void *tls_get(int key)
3291 Serge 16
 
3769 Serge 17
    void *val;
3258 Serge 18
    __asm__ __volatile__(
3769 Serge 19
    "movl %%fs:(%1), %0"
20
    :"=r"(val)
21
    :"r"(key));
22
23
  return val;
3254 Serge 24
 
3769 Serge 25
26
static inline int
3258 Serge 27
 
3769 Serge 28
{
29
    if(!(key & 3))
3258 Serge 30
    {
3769 Serge 31
        __asm__ __volatile__(
32
        "movl %0, %%fs:(%1)"
33
        ::"r"(ptr),"r"(key));
34
        return 0;
35
    }
36
    else return -1;
37
}
38
39
3258 Serge 40
 
41
 
42
 
3266 Serge 43
 
3769 Serge 44
uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
45
				  unsigned flags, uint32_t width, uint32_t height,
46
				  uint32_t bpp, uint32_t tiling, uint32_t *pitch);
47
48
void kgem_close_batches(struct kgem *kgem);
49
 
50
51
const struct intel_device_info *
52
 
3254 Serge 53
54
//struct kgem_bo *create_bo(bitmap_t *bitmap);
55
 
56
static bool sna_solid_cache_init(struct sna *sna);
57
 
58
struct sna *sna_device;
59
 
60
__LOCK_INIT_RECURSIVE(, __sna_lock);
61
 
3769 Serge 62
static void no_render_reset(struct sna *sna)
63
 
3258 Serge 64
	(void)sna;
65
}
66
67
void no_render_init(struct sna *sna)
68
 
3254 Serge 69
    struct sna_render *render = &sna->render;
70
71
    memset (render,0, sizeof (*render));
72
 
73
    render->prefer_gpu = PREFER_GPU_BLT;
74
 
75
    render->vertices = render->vertex_data;
76
 
77
78
//    render->composite = no_render_composite;
79
 
80
//    render->copy_boxes = no_render_copy_boxes;
81
 
82
83
//    render->fill_boxes = no_render_fill_boxes;
84
 
85
//    render->fill_one = no_render_fill_one;
86
//    render->clear = no_render_clear;
87
88
    render->reset = no_render_reset;
89
 
3258 Serge 90
//    render->fini = no_render_fini;
3263 Serge 91
3254 Serge 92
//    sna->kgem.context_switch = no_render_context_switch;
93
 
94
95
      if (sna->kgem.gen >= 60)
96
 
3258 Serge 97
3254 Serge 98
      sna_vertex_init(sna);
99
 
100
101
void sna_vertex_init(struct sna *sna)
102
 
103
//    pthread_mutex_init(&sna->render.lock, NULL);
104
//    pthread_cond_init(&sna->render.wait, NULL);
105
    sna->render.active = 0;
106
}
107
108
int sna_accel_init(struct sna *sna)
109
 
3291 Serge 110
    const char *backend;
3254 Serge 111
112
//    list_init(&sna->deferred_free);
113
 
114
//    list_init(&sna->active_pixmaps);
115
//    list_init(&sna->inactive_clock[0]);
116
//    list_init(&sna->inactive_clock[1]);
117
118
//    sna_accel_install_timers(sna);
119
 
120
121
 
122
 
123
124
 	if (sna->info->gen >= 0100) {
125
 
126
		if (gen7_render_init(sna))
3280 Serge 127
			backend = "IvyBridge";
3254 Serge 128
	} else if (sna->info->gen >= 060) {
3280 Serge 129
		if (gen6_render_init(sna))
3254 Serge 130
			backend = "SandyBridge";
131
	} else if (sna->info->gen >= 050) {
132
		if (gen5_render_init(sna))
3280 Serge 133
			backend = "Ironlake";
3254 Serge 134
	} else if (sna->info->gen >= 040) {
135
		if (gen4_render_init(sna))
3291 Serge 136
			backend = "Broadwater/Crestline";
3254 Serge 137
	} else if (sna->info->gen >= 030) {
138
		if (gen3_render_init(sna))
3299 Serge 139
			backend = "gen3";
3254 Serge 140
	}
3299 Serge 141
3254 Serge 142
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
143
 
144
145
    kgem_reset(&sna->kgem);
146
 
147
//    if (!sna_solid_cache_init(sna))
148
 
149
150
    sna_device = sna;
151
 
152
153
 
154
 
3263 Serge 155
3254 Serge 156
int sna_init(uint32_t service)
157
 
158
    ioctl_t   io;
159
    int caps = 0;
160
3769 Serge 161
    static struct pci_device device;
3254 Serge 162
 
163
164
    DBG(("%s\n", __FUNCTION__));
165
 
166
    __lock_acquire_recursive(__sna_lock);
167
 
3769 Serge 168
    if(sna_device)
3254 Serge 169
 
3769 Serge 170
171
    io.handle   = service;
3291 Serge 172
 
3254 Serge 173
    io.input    = &device;
3256 Serge 174
    io.inp_size = sizeof(device);
3254 Serge 175
    io.output   = NULL;
176
    io.out_size = 0;
177
178
    if (call_service(&io)!=0)
179
 
180
3769 Serge 181
    sna = malloc(sizeof(*sna));
3291 Serge 182
 
3769 Serge 183
        goto err1;
184
185
    memset(sna, 0, sizeof(*sna));
186
 
187
    sna->PciInfo = &device;
188
 
3254 Serge 189
  	sna->info = intel_detect_chipset(sna->PciInfo);
190
 
191
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
192
 
193
/*
3291 Serge 194
 
3254 Serge 195
                  OPTION_RELAXED_FENCING,
196
                  sna->kgem.has_relaxed_fencing)) {
197
        xf86DrvMsg(scrn->scrnIndex,
198
               sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
199
               "Disabling use of relaxed fencing\n");
200
        sna->kgem.has_relaxed_fencing = 0;
201
    }
202
    if (!xf86ReturnOptValBool(sna->Options,
203
                  OPTION_VMAP,
204
                  sna->kgem.has_vmap)) {
205
        xf86DrvMsg(scrn->scrnIndex,
206
               sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
207
               "Disabling use of vmap\n");
208
        sna->kgem.has_vmap = 0;
209
    }
210
*/
211
212
    /* Disable tiling by default */
213
 
214
215
    /* Default fail-safe value of 75 Hz */
216
 
217
218
    sna->flags = 0;
219
 
220
    sna_accel_init(sna);
221
 
3291 Serge 222
    tls_mask = tls_alloc();
223
 
3769 Serge 224
//    printf("tls mask %x\n", tls_mask);
3291 Serge 225
 
3769 Serge 226
done:
227
 
228
229
err1:
230
 
231
232
    return caps;
233
 
234
3254 Serge 235
void sna_fini()
236
 
3291 Serge 237
    if( sna_device )
238
    {
239
        struct kgem_bo *mask;
240
3769 Serge 241
        __lock_acquire_recursive(__sna_lock);
242
 
243
        mask = tls_get(tls_mask);
244
 
245
        sna_device->render.fini(sna_device);
246
 
3291 Serge 247
            kgem_bo_destroy(&sna_device->kgem, mask);
3769 Serge 248
        kgem_close_batches(&sna_device->kgem);
249
   	    kgem_cleanup_cache(&sna_device->kgem);
3291 Serge 250
251
   	    sna_device = NULL;
3769 Serge 252
 
253
    };
254
}
3291 Serge 255
256
#if 0
257
 
3254 Serge 258
static bool sna_solid_cache_init(struct sna *sna)
259
 
260
    struct sna_solid_cache *cache = &sna->render.solid_cache;
261
262
    DBG(("%s\n", __FUNCTION__));
263
 
264
    cache->cache_bo =
265
 
266
    if (!cache->cache_bo)
267
        return FALSE;
268
269
    /*
270
 
271
     * zeroth slot simplifies some of the checks.
272
     */
273
    cache->color[0] = 0xffffffff;
274
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
275
    cache->bo[0]->pitch = 4;
276
    cache->dirty = 1;
277
    cache->size = 1;
278
    cache->last = 0;
279
280
    return TRUE;
281
 
282
283
void
284
 
285
{
286
    struct sna_solid_cache *cache = &sna->render.solid_cache;
287
288
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
289
 
290
    assert(cache->size);
291
292
    kgem_bo_write(&sna->kgem, cache->cache_bo,
293
 
294
    cache->dirty = 0;
295
    cache->last = 0;
296
}
297
298
static void
299
 
300
{
301
    struct sna_solid_cache *cache = &sna->render.solid_cache;
302
    int i;
303
304
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
305
 
306
307
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
308
 
309
310
    if (cache->dirty)
311
 
312
313
    for (i = 0; i < cache->size; i++) {
314
 
315
            continue;
316
317
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
318
 
319
    }
320
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
321
322
    DBG(("sna_render_finish_solid reset\n"));
323
 
324
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
325
 
326
    cache->bo[0]->pitch = 4;
327
    if (force)
328
        cache->size = 1;
329
}
330
331
332
 
333
 
334
{
335
    struct sna_solid_cache *cache = &sna->render.solid_cache;
336
    int i;
337
338
    DBG(("%s: %08x\n", __FUNCTION__, color));
339
 
340
//    if ((color & 0xffffff) == 0) /* alpha only */
341
 
342
343
    if (color == 0xffffffff) {
344
 
345
        return kgem_bo_reference(cache->bo[0]);
346
    }
347
348
    if (cache->color[cache->last] == color) {
349
 
350
             cache->last, color));
351
        return kgem_bo_reference(cache->bo[cache->last]);
352
    }
353
354
    for (i = 1; i < cache->size; i++) {
355
 
356
            if (cache->bo[i] == NULL) {
357
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
358
                     i, color));
359
                goto create;
360
            } else {
361
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
362
                     i, color));
363
                goto done;
364
            }
365
        }
366
    }
367
368
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
369
 
370
    i = cache->size++;
371
 
372
    cache->dirty = 1;
373
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
374
375
create:
376
 
377
                     i*sizeof(uint32_t), sizeof(uint32_t));
378
    cache->bo[i]->pitch = 4;
379
380
done:
381
 
382
    return kgem_bo_reference(cache->bo[i]);
383
}
384
385
#endif
386
 
3769 Serge 387
3254 Serge 388
 
389
 
3263 Serge 390
391
{
3254 Serge 392
 
393
    struct _Pixmap src, dst;
394
    struct kgem_bo *src_bo;
3263 Serge 395
396
    char proc_info[1024];
3254 Serge 397
 
3266 Serge 398
399
    get_proc_info(proc_info);
400
 
401
    winx = *(uint32_t*)(proc_info+34);
402
 
403
404
    memset(&src, 0, sizeof(src));
405
 
3263 Serge 406
407
    src.drawable.bitsPerPixel = 32;
3254 Serge 408
 
3263 Serge 409
    src.drawable.height = src_bitmap->height;
410
411
    dst.drawable.bitsPerPixel = 32;
3254 Serge 412
 
3263 Serge 413
    dst.drawable.height = sna_fb.height;
414
415
    memset(©, 0, sizeof(copy));
3291 Serge 416
 
3254 Serge 417
    src_bo = (struct kgem_bo*)src_bitmap->handle;
418
 
3263 Serge 419
    if( sna_device->render.copy(sna_device, GXcopy,
420
 
421
                                &dst, sna_fb.fb_bo, ©) )
422
    {
423
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
424
        copy.done(sna_device, ©);
3266 Serge 425
    }
3291 Serge 426
3263 Serge 427
    kgem_submit(&sna_device->kgem);
3254 Serge 428
 
3263 Serge 429
    return 0;
3291 Serge 430
 
3769 Serge 431
//    __asm__ __volatile__("int3");
432
 
3263 Serge 433
};
3291 Serge 434
 
3254 Serge 435
typedef struct
436
 
3769 Serge 437
    uint32_t        width;
438
    uint32_t        height;
439
    void           *data;
440
    uint32_t        pitch;
441
    struct kgem_bo *bo;
442
    uint32_t        bo_size;
443
    uint32_t        flags;
444
}surface_t;
445
446
3280 Serge 447
 
3769 Serge 448
 
449
 
3263 Serge 450
    surface_t *sf;
451
	struct kgem_bo *bo;
3769 Serge 452
3263 Serge 453
    sf = malloc(sizeof(*sf));
3291 Serge 454
 
3769 Serge 455
        goto err_1;
456
457
    __lock_acquire_recursive(__sna_lock);
458
 
459
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
460
 
3263 Serge 461
462
    if(bo == NULL)
3291 Serge 463
 
3263 Serge 464
3769 Serge 465
    void *map = kgem_bo_map(&sna_device->kgem, bo);
3263 Serge 466
 
467
        goto err_3;
468
3769 Serge 469
    sf->width   = bitmap->width;
3263 Serge 470
 
3769 Serge 471
    sf->data    = map;
472
    sf->pitch   = bo->pitch;
473
    sf->bo      = bo;
474
    sf->bo_size = PAGE_SIZE * bo->size.pages.count;
475
    sf->flags   = bitmap->flags;
476
477
    bitmap->handle = (uint32_t)sf;
3263 Serge 478
 
3769 Serge 479
480
    return 0;
481
 
3263 Serge 482
err_3:
483
 
3769 Serge 484
err_2:
485
    __lock_release_recursive(__sna_lock);
3263 Serge 486
    free(sf);
3769 Serge 487
err_1:
488
    return -1;
3263 Serge 489
};
3291 Serge 490
3769 Serge 491
int sna_destroy_bitmap(bitmap_t *bitmap)
492
 
493
    surface_t *sf = to_surface(bitmap);
494
495
    __lock_acquire_recursive(__sna_lock);
496
 
497
    kgem_bo_destroy(&sna_device->kgem, sf->bo);
498
 
499
    __lock_release_recursive(__sna_lock);
3266 Serge 500
 
3769 Serge 501
    free(sf);
502
 
503
    bitmap->handle = -1;
504
 
505
    bitmap->pitch  = -1;
506
507
    return 0;
508
 
509
3263 Serge 510
int sna_lock_bitmap(bitmap_t *bitmap)
3266 Serge 511
 
3769 Serge 512
    surface_t *sf = to_surface(bitmap);
3291 Serge 513
3769 Serge 514
//    printf("%s\n", __FUNCTION__);
3291 Serge 515
 
3769 Serge 516
517
    kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
518
 
519
    __lock_release_recursive(__sna_lock);
520
 
521
    bitmap->data  = sf->data;
3291 Serge 522
 
3769 Serge 523
524
    return 0;
3291 Serge 525
 
3769 Serge 526
3291 Serge 527
int sna_resize_bitmap(bitmap_t *bitmap)
528
 
3769 Serge 529
    surface_t *sf = to_surface(bitmap);
3266 Serge 530
    struct kgem *kgem = &sna_device->kgem;
3769 Serge 531
    struct kgem_bo *bo = sf->bo;
532
533
    uint32_t   size;
3266 Serge 534
 
3769 Serge 535
536
   	bitmap->pitch = -1;
537
 
538
539
	size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
540
 
541
	assert(size && size <= kgem->max_object_size);
542
543
    if(sf->bo_size >= size)
3266 Serge 544
 
3769 Serge 545
        sf->width   = bitmap->width;
546
        sf->height  = bitmap->height;
547
        sf->pitch   = pitch;
548
        bo->pitch   = pitch;
549
550
	    return 0;
551
 
552
    else
553
    {
554
        __lock_acquire_recursive(__sna_lock);
555
556
        sna_bo_destroy(kgem, bo);
557
 
558
        sf->bo = NULL;
559
 
560
        bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
561
 
562
563
        if(bo == NULL)
3266 Serge 564
 
3769 Serge 565
            __lock_release_recursive(__sna_lock);
566
            return -1;
567
        };
568
569
        void *map = kgem_bo_map(kgem, bo);
570
 
571
        {
572
            sna_bo_destroy(kgem, bo);
573
            __lock_release_recursive(__sna_lock);
574
            return -1;
575
        };
576
577
        __lock_release_recursive(__sna_lock);
578
 
579
        sf->width   = bitmap->width;
580
 
581
        sf->data    = map;
582
        sf->pitch   = bo->pitch;
583
        sf->bo      = bo;
584
        sf->bo_size = PAGE_SIZE * bo->size.pages.count;
585
    }
586
587
    return 0;
588
 
589
3266 Serge 590
591
 
3769 Serge 592
 
593
 
3278 Serge 594
	struct kgem_bo *bo;
595
596
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
3266 Serge 597
 
3299 Serge 598
    __lock_acquire_recursive(__sna_lock);
3278 Serge 599
 
3769 Serge 600
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
601
 
3291 Serge 602
3278 Serge 603
    if(unlikely(bo == NULL))
604
 
3769 Serge 605
3278 Serge 606
    int *map = kgem_bo_map(&sna_device->kgem, bo);
607
 
608
        goto err_2;
609
610
    __lock_release_recursive(__sna_lock);
611
 
3769 Serge 612
    memset(map, 0, bo->pitch * sna_fb.height);
613
 
614
    tls_set(tls_mask, bo);
3278 Serge 615
 
3769 Serge 616
    return 0;
3291 Serge 617
 
3278 Serge 618
err_2:
619
 
620
err_1:
621
    __lock_release_recursive(__sna_lock);
622
    return -1;
3769 Serge 623
};
3278 Serge 624
625
3254 Serge 626
 
3278 Serge 627
 
3299 Serge 628
              uint8_t op,
629
		      PixmapPtr src, struct kgem_bo *src_bo,
630
		      PixmapPtr mask,struct kgem_bo *mask_bo,
631
		      PixmapPtr dst, struct kgem_bo *dst_bo,
632
              int32_t src_x, int32_t src_y,
633
              int32_t msk_x, int32_t msk_y,
634
              int32_t dst_x, int32_t dst_y,
635
              int32_t width, int32_t height,
636
              struct sna_composite_op *tmp);
637
638
3278 Serge 639
 
640
 
641
int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
642
 
3769 Serge 643
3278 Serge 644
{
645
 
3254 Serge 646
3769 Serge 647
    struct drm_i915_mask_update update;
3254 Serge 648
 
3278 Serge 649
    struct sna_composite_op composite;
650
 
651
    struct kgem_bo *src_bo, *mask_bo;
652
    int winx, winy;
3769 Serge 653
654
    char proc_info[1024];
3254 Serge 655
 
3278 Serge 656
    get_proc_info(proc_info);
3254 Serge 657
 
3278 Serge 658
    winx = *(uint32_t*)(proc_info+34);
3254 Serge 659
 
3278 Serge 660
//    winw = *(uint32_t*)(proc_info+42)+1;
661
//    winh = *(uint32_t*)(proc_info+46)+1;
3769 Serge 662
663
    mask_bo = tls_get(tls_mask);
3278 Serge 664
 
3769 Serge 665
    if(unlikely(mask_bo == NULL))
666
 
667
        sna_create_mask();
668
        mask_bo = tls_get(tls_mask);
669
        if( mask_bo == NULL)
670
            return -1;
671
    };
672
673
    if(kgem_update_fb(&sna_device->kgem, &sna_fb))
674
 
675
        __lock_acquire_recursive(__sna_lock);
676
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
677
        __lock_release_recursive(__sna_lock);
678
679
        sna_create_mask();
680
 
681
        if( mask_bo == NULL)
682
            return -1;
683
    }
684
685
    VG_CLEAR(update);
686
 
3291 Serge 687
	update.bo_map    = (__u32)MAP(mask_bo->map);
688
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
689
    mask_bo->pitch = update.bo_pitch;
690
691
    memset(&src, 0, sizeof(src));
692
 
3278 Serge 693
    memset(&mask, 0, sizeof(dst));
694
695
    src.drawable.bitsPerPixel = 32;
696
 
697
    src.drawable.width  = sf->width;
3769 Serge 698
 
699
700
    dst.drawable.bitsPerPixel = 32;
3278 Serge 701
 
702
    dst.drawable.height = sna_fb.height;
703
704
    mask.drawable.bitsPerPixel = 8;
705
 
706
    mask.drawable.height = update.height;
3291 Serge 707
708
    memset(&composite, 0, sizeof(composite));
3278 Serge 709
 
710
    src_bo = sf->bo;
711
 
3769 Serge 712
    __lock_acquire_recursive(__sna_lock);
3278 Serge 713
 
3769 Serge 714
715
 
3278 Serge 716
 
3769 Serge 717
		      &mask, mask_bo,
3299 Serge 718
		      &dst, sna_fb.fb_bo,
719
              src_x, src_y,
720
              dst_x, dst_y,
721
              winx+dst_x, winy+dst_y,
722
              w, h,
723
              &composite) )
724
    {
3278 Serge 725
	    struct sna_composite_rectangles r;
726
3299 Serge 727
	    r.src.x = src_x;
3278 Serge 728
 
729
	    r.mask.x = dst_x;
730
	    r.mask.y = dst_y;
731
		r.dst.x = winx+dst_x;
732
	    r.dst.y = winy+dst_y;
3299 Serge 733
	    r.width  = w;
3278 Serge 734
	    r.height = h;
3299 Serge 735
736
        composite.blt(sna_device, &composite, &r);
3278 Serge 737
 
738
739
    };
3769 Serge 740
 
3278 Serge 741
    kgem_submit(&sna_device->kgem);
742
 
743
    __lock_release_recursive(__sna_lock);
744
 
3769 Serge 745
    bitmap->data   = (void*)-1;
746
 
747
748
    return 0;
749
 
3299 Serge 750
3278 Serge 751
3254 Serge 752
 
753
 
754
 
755
 
756
 
3278 Serge 757
 
758
 
759
 
760
 
3254 Serge 761
};
762
763
static const struct intel_device_info intel_i915_info = {
764
 
765
};
766
static const struct intel_device_info intel_i945_info = {
767
	.gen = 031,
768
};
769
770
static const struct intel_device_info intel_g33_info = {
771
 
772
};
773
774
static const struct intel_device_info intel_i965_info = {
775
 
776
};
777
778
static const struct intel_device_info intel_g4x_info = {
779
 
780
};
781
782
static const struct intel_device_info intel_ironlake_info = {
783
 
784
};
785
786
static const struct intel_device_info intel_sandybridge_info = {
787
 
788
};
789
790
static const struct intel_device_info intel_ivybridge_info = {
791
 
792
};
793
794
static const struct intel_device_info intel_valleyview_info = {
795
 
796
};
797
798
static const struct intel_device_info intel_haswell_info = {
799
 
800
};
801
802
#define INTEL_DEVICE_MATCH(d,i) \
803
 
804
805
806
 
807
 
808
809
 
810
 
811
	INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
812
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
813
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
814
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
815
816
	INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
817
 
818
	INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
819
	INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
820
	/* Another marketing win: Q35 is another g33 device not a gen4 part
821
	 * like its G35 brethren.
822
	 */
823
	INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
824
825
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
826
 
827
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
828
	INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
829
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
830
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
831
832
	INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
833
 
834
	INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
835
	INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
836
	INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
837
	INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
838
	INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
839
840
	INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
841
 
842
843
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
844
 
845
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
846
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
847
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
848
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
849
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
850
851
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
852
 
853
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
854
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
855
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
856
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
857
858
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
859
 
860
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
861
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
862
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
863
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
864
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
865
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
866
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
867
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
868
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
869
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
870
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
871
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
872
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
873
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
874
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
875
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
876
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
877
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
878
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
879
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
880
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
881
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
882
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
883
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
884
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
885
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
886
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
887
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
888
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
889
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
890
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
891
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
892
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
893
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
894
895
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
896
 
897
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
898
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
899
900
	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
901
 
902
	{ 0, 0, 0 },
903
 
904
905
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
906
 
907
    while(list->device_id)
908
    {
909
        if(dev==list->device_id)
910
            return list;
911
        list++;
912
    }
913
    return NULL;
914
}
915
916
const struct intel_device_info *
917
 
918
{
919
    const struct pci_id_match *ent = NULL;
920
921
    ent = PciDevMatch(pci->device_id, intel_device_match);
922
 
923
    if(ent != NULL)
924
 
925
    else
926
        return &intel_generic_info;
927
928
#if 0
929
 
930
		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
931
			name = intel_chipsets[i].name;
932
			break;
933
		}
934
	}
935
	if (name == NULL) {
936
		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
937
		name = "unknown";
938
	} else {
939
		xf86DrvMsg(scrn->scrnIndex, from,
940
			   "Integrated Graphics Chipset: Intel(R) %s\n",
941
			   name);
942
	}
943
944
	scrn->chipset = name;
945
 
946
947
}
948
 
949
950
 
951
 
3258 Serge 952
    ioctl_t  io;
953
954
    io.handle   = fd;
3254 Serge 955
 
3258 Serge 956
    io.input    = arg;
957
    io.inp_size = 64;
958
    io.output   = NULL;
959
    io.out_size = 0;
960
961
    return call_service(&io);
3254 Serge 962
 
3258 Serge 963