Subversion Repositories Kolibri OS

Rev

Rev 3291 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
//#include "../bitmap.h"
2
 
3
#include 
4
#include 
5
 
6
#include "sna.h"
7
 
3263 Serge 8
#include 
9
 
10
static struct sna_fb sna_fb;
3278 Serge 11
static struct kgem_bo *mask_bo;
3263 Serge 12
 
3291 Serge 13
static int mask_width, mask_height;
14
 
3258 Serge 15
typedef struct __attribute__((packed))
16
{
17
  unsigned      handle;
18
  unsigned      io_code;
19
  void          *input;
20
  int           inp_size;
21
  void          *output;
22
  int           out_size;
23
}ioctl_t;
3254 Serge 24
 
3258 Serge 25
 
26
static int call_service(ioctl_t *io)
27
{
28
  int retval;
29
 
30
  asm volatile("int $0x40"
31
      :"=a"(retval)
32
      :"a"(68),"b"(17),"c"(io)
33
      :"memory","cc");
34
 
35
  return retval;
36
};
37
 
3266 Serge 38
static inline void get_proc_info(char *info)
39
{
40
    __asm__ __volatile__(
41
    "int $0x40"
42
    :
43
    :"a"(9), "b"(info), "c"(-1));
44
}
45
 
3254 Serge 46
const struct intel_device_info *
47
intel_detect_chipset(struct pci_device *pci);
48
 
49
//struct kgem_bo *create_bo(bitmap_t *bitmap);
50
 
51
static bool sna_solid_cache_init(struct sna *sna);
52
 
53
struct sna *sna_device;
54
 
3258 Serge 55
static void no_render_reset(struct sna *sna)
56
{
57
	(void)sna;
58
}
59
 
3254 Serge 60
void no_render_init(struct sna *sna)
61
{
62
    struct sna_render *render = &sna->render;
63
 
64
    memset (render,0, sizeof (*render));
65
 
66
    render->prefer_gpu = PREFER_GPU_BLT;
67
 
68
    render->vertices = render->vertex_data;
69
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
70
 
71
//    render->composite = no_render_composite;
72
 
73
//    render->copy_boxes = no_render_copy_boxes;
74
//    render->copy = no_render_copy;
75
 
76
//    render->fill_boxes = no_render_fill_boxes;
77
//    render->fill = no_render_fill;
78
//    render->fill_one = no_render_fill_one;
79
//    render->clear = no_render_clear;
80
 
3258 Serge 81
    render->reset = no_render_reset;
3263 Serge 82
//    render->flush = no_render_flush;
3254 Serge 83
//    render->fini = no_render_fini;
84
 
85
//    sna->kgem.context_switch = no_render_context_switch;
86
//    sna->kgem.retire = no_render_retire;
87
 
3258 Serge 88
      if (sna->kgem.gen >= 60)
3254 Serge 89
        sna->kgem.ring = KGEM_RENDER;
90
 
91
      sna_vertex_init(sna);
92
}
93
 
94
void sna_vertex_init(struct sna *sna)
95
{
96
//    pthread_mutex_init(&sna->render.lock, NULL);
97
//    pthread_cond_init(&sna->render.wait, NULL);
98
    sna->render.active = 0;
99
}
100
 
3291 Serge 101
int sna_accel_init(struct sna *sna)
3254 Serge 102
{
103
    const char *backend;
104
 
105
//    list_init(&sna->deferred_free);
106
//    list_init(&sna->dirty_pixmaps);
107
//    list_init(&sna->active_pixmaps);
108
//    list_init(&sna->inactive_clock[0]);
109
//    list_init(&sna->inactive_clock[1]);
110
 
111
//    sna_accel_install_timers(sna);
112
 
113
 
114
    backend = "no";
115
    no_render_init(sna);
116
 
117
 	if (sna->info->gen >= 0100) {
3280 Serge 118
	} else if (sna->info->gen >= 070) {
3254 Serge 119
		if (gen7_render_init(sna))
3280 Serge 120
			backend = "IvyBridge";
3254 Serge 121
	} else if (sna->info->gen >= 060) {
122
		if (gen6_render_init(sna))
123
			backend = "SandyBridge";
3280 Serge 124
	} else if (sna->info->gen >= 050) {
3254 Serge 125
		if (gen5_render_init(sna))
126
			backend = "Ironlake";
3291 Serge 127
	} else if (sna->info->gen >= 040) {
3254 Serge 128
		if (gen4_render_init(sna))
129
			backend = "Broadwater/Crestline";
3299 Serge 130
	} else if (sna->info->gen >= 030) {
3254 Serge 131
		if (gen3_render_init(sna))
3299 Serge 132
			backend = "gen3";
3254 Serge 133
	}
134
 
135
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
136
	     __FUNCTION__, backend, sna->render.prefer_gpu));
137
 
138
    kgem_reset(&sna->kgem);
139
 
140
//    if (!sna_solid_cache_init(sna))
141
//        return false;
142
 
143
    sna_device = sna;
144
 
145
 
3263 Serge 146
    return kgem_init_fb(&sna->kgem, &sna_fb);
3254 Serge 147
}
148
 
149
int sna_init(uint32_t service)
150
{
151
    ioctl_t   io;
152
 
153
    static struct pci_device device;
154
    struct sna *sna;
155
 
156
    DBG(("%s\n", __FUNCTION__));
157
 
3291 Serge 158
    sna = malloc(sizeof(*sna));
3254 Serge 159
    if (sna == NULL)
3291 Serge 160
        return 0;
3254 Serge 161
 
3291 Serge 162
    memset(sna, 0, sizeof(*sna));
163
 
3254 Serge 164
    io.handle   = service;
3256 Serge 165
    io.io_code  = SRV_GET_PCI_INFO;
3254 Serge 166
    io.input    = &device;
167
    io.inp_size = sizeof(device);
168
    io.output   = NULL;
169
    io.out_size = 0;
170
 
171
    if (call_service(&io)!=0)
3291 Serge 172
    {
173
        free(sna);
174
        return 0;
175
    };
176
 
3254 Serge 177
    sna->PciInfo = &device;
178
 
179
  	sna->info = intel_detect_chipset(sna->PciInfo);
180
 
181
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
3291 Serge 182
 
3254 Serge 183
/*
184
    if (!xf86ReturnOptValBool(sna->Options,
185
                  OPTION_RELAXED_FENCING,
186
                  sna->kgem.has_relaxed_fencing)) {
187
        xf86DrvMsg(scrn->scrnIndex,
188
               sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
189
               "Disabling use of relaxed fencing\n");
190
        sna->kgem.has_relaxed_fencing = 0;
191
    }
192
    if (!xf86ReturnOptValBool(sna->Options,
193
                  OPTION_VMAP,
194
                  sna->kgem.has_vmap)) {
195
        xf86DrvMsg(scrn->scrnIndex,
196
               sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
197
               "Disabling use of vmap\n");
198
        sna->kgem.has_vmap = 0;
199
    }
200
*/
201
 
202
    /* Disable tiling by default */
203
    sna->tiling = SNA_TILING_DISABLE;
204
 
205
    /* Default fail-safe value of 75 Hz */
206
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
207
 
208
    sna->flags = 0;
209
 
3291 Serge 210
    sna_accel_init(sna);
211
 
212
    delay(10);
213
 
214
    return sna->render.caps;
3254 Serge 215
}
216
 
3291 Serge 217
void sna_fini()
218
{
219
    if( sna_device )
220
    {
221
        sna_device->render.fini(sna_device);
222
        kgem_bo_destroy(&sna_device->kgem, mask_bo);
223
        kgem_close_batches(&sna_device->kgem);
224
   	    kgem_cleanup_cache(&sna_device->kgem);
225
    };
226
}
227
 
3254 Serge 228
#if 0
229
 
230
static bool sna_solid_cache_init(struct sna *sna)
231
{
232
    struct sna_solid_cache *cache = &sna->render.solid_cache;
233
 
234
    DBG(("%s\n", __FUNCTION__));
235
 
236
    cache->cache_bo =
237
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
238
    if (!cache->cache_bo)
239
        return FALSE;
240
 
241
    /*
242
     * Initialise [0] with white since it is very common and filling the
243
     * zeroth slot simplifies some of the checks.
244
     */
245
    cache->color[0] = 0xffffffff;
246
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
247
    cache->bo[0]->pitch = 4;
248
    cache->dirty = 1;
249
    cache->size = 1;
250
    cache->last = 0;
251
 
252
    return TRUE;
253
}
254
 
255
void
256
sna_render_flush_solid(struct sna *sna)
257
{
258
    struct sna_solid_cache *cache = &sna->render.solid_cache;
259
 
260
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
261
    assert(cache->dirty);
262
    assert(cache->size);
263
 
264
    kgem_bo_write(&sna->kgem, cache->cache_bo,
265
              cache->color, cache->size*sizeof(uint32_t));
266
    cache->dirty = 0;
267
    cache->last = 0;
268
}
269
 
270
static void
271
sna_render_finish_solid(struct sna *sna, bool force)
272
{
273
    struct sna_solid_cache *cache = &sna->render.solid_cache;
274
    int i;
275
 
276
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
277
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
278
 
279
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
280
        return;
281
 
282
    if (cache->dirty)
283
        sna_render_flush_solid(sna);
284
 
285
    for (i = 0; i < cache->size; i++) {
286
        if (cache->bo[i] == NULL)
287
            continue;
288
 
289
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
290
        cache->bo[i] = NULL;
291
    }
292
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
293
 
294
    DBG(("sna_render_finish_solid reset\n"));
295
 
296
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
297
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
298
    cache->bo[0]->pitch = 4;
299
    if (force)
300
        cache->size = 1;
301
}
302
 
303
 
304
struct kgem_bo *
305
sna_render_get_solid(struct sna *sna, uint32_t color)
306
{
307
    struct sna_solid_cache *cache = &sna->render.solid_cache;
308
    int i;
309
 
310
    DBG(("%s: %08x\n", __FUNCTION__, color));
311
 
312
//    if ((color & 0xffffff) == 0) /* alpha only */
313
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
314
 
315
    if (color == 0xffffffff) {
316
        DBG(("%s(white)\n", __FUNCTION__));
317
        return kgem_bo_reference(cache->bo[0]);
318
    }
319
 
320
    if (cache->color[cache->last] == color) {
321
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
322
             cache->last, color));
323
        return kgem_bo_reference(cache->bo[cache->last]);
324
    }
325
 
326
    for (i = 1; i < cache->size; i++) {
327
        if (cache->color[i] == color) {
328
            if (cache->bo[i] == NULL) {
329
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
330
                     i, color));
331
                goto create;
332
            } else {
333
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
334
                     i, color));
335
                goto done;
336
            }
337
        }
338
    }
339
 
340
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
341
 
342
    i = cache->size++;
343
    cache->color[i] = color;
344
    cache->dirty = 1;
345
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
346
 
347
create:
348
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
349
                     i*sizeof(uint32_t), sizeof(uint32_t));
350
    cache->bo[i]->pitch = 4;
351
 
352
done:
353
    cache->last = i;
354
    return kgem_bo_reference(cache->bo[i]);
355
}
356
 
357
 
358
 
3263 Serge 359
int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
360
                  int w, int h, int src_x, int src_y)
3254 Serge 361
 
362
{
363
    struct sna_copy_op copy;
3263 Serge 364
    struct _Pixmap src, dst;
365
    struct kgem_bo *src_bo;
3254 Serge 366
 
3266 Serge 367
    char proc_info[1024];
368
    int winx, winy;
369
 
370
    get_proc_info(proc_info);
371
 
372
    winx = *(uint32_t*)(proc_info+34);
373
    winy = *(uint32_t*)(proc_info+38);
374
 
3263 Serge 375
    memset(&src, 0, sizeof(src));
376
    memset(&dst, 0, sizeof(dst));
3254 Serge 377
 
3263 Serge 378
    src.drawable.bitsPerPixel = 32;
379
    src.drawable.width  = src_bitmap->width;
380
    src.drawable.height = src_bitmap->height;
3254 Serge 381
 
3263 Serge 382
    dst.drawable.bitsPerPixel = 32;
383
    dst.drawable.width  = sna_fb.width;
384
    dst.drawable.height = sna_fb.height;
3291 Serge 385
 
3254 Serge 386
    memset(©, 0, sizeof(copy));
387
 
3263 Serge 388
    src_bo = (struct kgem_bo*)src_bitmap->handle;
389
 
390
    if( sna_device->render.copy(sna_device, GXcopy,
391
                                &src, src_bo,
392
                                &dst, sna_fb.fb_bo, ©) )
393
    {
3266 Serge 394
        copy.blt(sna_device, ©, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
3291 Serge 395
        copy.done(sna_device, ©);
3263 Serge 396
    }
3254 Serge 397
 
3263 Serge 398
    kgem_submit(&sna_device->kgem);
3291 Serge 399
 
3263 Serge 400
//    __asm__ __volatile__("int3");
3291 Serge 401
 
3254 Serge 402
};
3280 Serge 403
#endif
3254 Serge 404
 
3280 Serge 405
 
3263 Serge 406
int sna_create_bitmap(bitmap_t *bitmap)
407
{
408
	struct kgem_bo *bo;
3291 Serge 409
 
3263 Serge 410
    bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
411
                        32,I915_TILING_NONE, CREATE_CPU_MAP);
3291 Serge 412
 
3263 Serge 413
    if(bo == NULL)
414
        goto err_1;
415
 
416
    void *map = kgem_bo_map(&sna_device->kgem, bo);
417
    if(map == NULL)
418
        goto err_2;
419
 
420
    bitmap->handle = (uint32_t)bo;
421
    bitmap->pitch  = bo->pitch;
422
    bitmap->data   = map;
423
 
424
    return 0;
425
 
426
err_2:
427
    kgem_bo_destroy(&sna_device->kgem, bo);
428
 
429
err_1:
3291 Serge 430
    return -1;
3266 Serge 431
 
3263 Serge 432
};
3266 Serge 433
 
3291 Serge 434
void sna_destroy_bitmap(bitmap_t *bitmap)
435
{
436
	struct kgem_bo *bo;
437
 
438
    bo = (struct kgem_bo *)bitmap->handle;
439
 
440
    kgem_bo_destroy(&sna_device->kgem, bo);
441
 
442
};
443
 
3266 Serge 444
void sna_lock_bitmap(bitmap_t *bitmap)
445
{
446
	struct kgem_bo *bo;
447
 
448
    bo = (struct kgem_bo *)bitmap->handle;
449
 
450
    kgem_bo_sync__cpu(&sna_device->kgem, bo);
451
 
452
};
453
 
3278 Serge 454
int sna_create_mask()
455
{
456
	struct kgem_bo *bo;
457
    int width, height;
458
    int i;
3266 Serge 459
 
3299 Serge 460
//    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
3278 Serge 461
 
3291 Serge 462
    bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
3278 Serge 463
                        8,I915_TILING_NONE, CREATE_CPU_MAP);
464
 
465
    if(bo == NULL)
466
        goto err_1;
467
 
468
    int *map = kgem_bo_map(&sna_device->kgem, bo);
469
    if(map == NULL)
470
        goto err_2;
471
 
472
    memset(map, 0, bo->pitch * height);
473
 
3291 Serge 474
    mask_bo     = bo;
475
    mask_width  = width;
476
    mask_height = height;
477
 
3278 Serge 478
    return 0;
479
 
480
err_2:
481
    kgem_bo_destroy(&sna_device->kgem, bo);
482
 
483
err_1:
484
    return -1;
485
 
486
};
3254 Serge 487
 
3278 Serge 488
 
3299 Serge 489
bool
490
gen6_composite(struct sna *sna,
491
              uint8_t op,
492
		      PixmapPtr src, struct kgem_bo *src_bo,
493
		      PixmapPtr mask,struct kgem_bo *mask_bo,
494
		      PixmapPtr dst, struct kgem_bo *dst_bo,
495
              int32_t src_x, int32_t src_y,
496
              int32_t msk_x, int32_t msk_y,
497
              int32_t dst_x, int32_t dst_y,
498
              int32_t width, int32_t height,
499
              struct sna_composite_op *tmp);
3278 Serge 500
 
501
 
502
#define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
503
 
504
int sna_blit_tex(bitmap_t *src_bitmap, int dst_x, int dst_y,
505
                  int w, int h, int src_x, int src_y)
506
 
3254 Serge 507
{
508
 
3278 Serge 509
//    box.x1 = dst_x;
510
//    box.y1 = dst_y;
511
//    box.x2 = dst_x+w;
512
//    box.y2 = dst_y+h;
3254 Serge 513
 
514
 
3278 Serge 515
 //   cop.box(sna_device, &cop, &box);
3254 Serge 516
 
3278 Serge 517
    struct drm_i915_mask_update update;
518
 
519
    struct sna_composite_op composite;
520
    struct _Pixmap src, dst, mask;
521
    struct kgem_bo *src_bo;
3254 Serge 522
 
3278 Serge 523
    char proc_info[1024];
524
    int winx, winy, winw, winh;
3254 Serge 525
 
3278 Serge 526
    get_proc_info(proc_info);
3254 Serge 527
 
3278 Serge 528
    winx = *(uint32_t*)(proc_info+34);
529
    winy = *(uint32_t*)(proc_info+38);
530
    winw = *(uint32_t*)(proc_info+42)+1;
531
    winh = *(uint32_t*)(proc_info+46)+1;
532
 
3291 Serge 533
    VG_CLEAR(update);
534
	update.handle = mask_bo->handle;
535
//	update.bo_size   = __kgem_bo_size(mask_bo);
536
//	update.bo_pitch  = mask_bo->pitch;
537
	update.bo_map    = (__u32)MAP(mask_bo->map);
538
	drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
539
    mask_bo->pitch = update.bo_pitch;
540
 
3278 Serge 541
    memset(&src, 0, sizeof(src));
542
    memset(&dst, 0, sizeof(dst));
543
    memset(&mask, 0, sizeof(dst));
544
 
545
    src.drawable.bitsPerPixel = 32;
546
    src.drawable.width  = src_bitmap->width;
547
    src.drawable.height = src_bitmap->height;
548
 
549
    dst.drawable.bitsPerPixel = 32;
550
    dst.drawable.width  = sna_fb.width;
551
    dst.drawable.height = sna_fb.height;
552
 
553
    mask.drawable.bitsPerPixel = 8;
3291 Serge 554
    mask.drawable.width  = update.width;
555
    mask.drawable.height = update.height;
3278 Serge 556
 
557
    memset(&composite, 0, sizeof(composite));
558
 
559
    src_bo = (struct kgem_bo*)src_bitmap->handle;
560
 
561
 
3299 Serge 562
    if( sna_device->render.blit_tex(sna_device, PictOpSrc,
563
		      &src, src_bo,
564
		      &mask, mask_bo,
565
		      &dst, sna_fb.fb_bo,
566
              src_x, src_y,
567
              dst_x, dst_y,
568
              winx+dst_x, winy+dst_y,
569
              w, h,
3278 Serge 570
              &composite) )
571
    {
3299 Serge 572
	    struct sna_composite_rectangles r;
3278 Serge 573
 
574
	    r.src.x = src_x;
575
	    r.src.y = src_y;
576
	    r.mask.x = dst_x;
577
	    r.mask.y = dst_y;
3299 Serge 578
		r.dst.x = winx+dst_x;
3278 Serge 579
	    r.dst.y = winy+dst_y;
3299 Serge 580
	    r.width  = w;
581
	    r.height = h;
3278 Serge 582
 
583
        composite.blt(sna_device, &composite, &r);
584
        composite.done(sna_device, &composite);
585
    };
586
 
587
    kgem_submit(&sna_device->kgem);
588
 
3299 Serge 589
    return 0;
3278 Serge 590
}
3254 Serge 591
 
592
 
593
 
594
 
595
 
3278 Serge 596
 
597
 
598
 
599
 
3254 Serge 600
static const struct intel_device_info intel_generic_info = {
601
	.gen = -1,
602
};
603
 
604
static const struct intel_device_info intel_i915_info = {
605
	.gen = 030,
606
};
607
static const struct intel_device_info intel_i945_info = {
608
	.gen = 031,
609
};
610
 
611
static const struct intel_device_info intel_g33_info = {
612
	.gen = 033,
613
};
614
 
615
static const struct intel_device_info intel_i965_info = {
616
	.gen = 040,
617
};
618
 
619
static const struct intel_device_info intel_g4x_info = {
620
	.gen = 045,
621
};
622
 
623
static const struct intel_device_info intel_ironlake_info = {
624
	.gen = 050,
625
};
626
 
627
static const struct intel_device_info intel_sandybridge_info = {
628
	.gen = 060,
629
};
630
 
631
static const struct intel_device_info intel_ivybridge_info = {
632
	.gen = 070,
633
};
634
 
635
static const struct intel_device_info intel_valleyview_info = {
636
	.gen = 071,
637
};
638
 
639
static const struct intel_device_info intel_haswell_info = {
640
	.gen = 075,
641
};
642
 
643
#define INTEL_DEVICE_MATCH(d,i) \
644
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
645
 
646
 
647
static const struct pci_id_match intel_device_match[] = {
648
 
649
 
650
	INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
651
	INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
652
	INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
653
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
654
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
655
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
656
 
657
	INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
658
	INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
659
	INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
660
	INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
661
	/* Another marketing win: Q35 is another g33 device not a gen4 part
662
	 * like its G35 brethren.
663
	 */
664
	INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
665
 
666
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
667
	INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
668
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
669
	INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
670
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
671
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
672
 
673
	INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
674
	INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
675
	INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
676
	INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
677
	INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
678
	INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
679
	INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
680
 
681
	INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
682
	INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
683
 
684
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
685
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
686
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
687
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
688
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
689
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
690
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
691
 
692
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
693
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
694
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
695
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
696
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
697
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
698
 
699
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
700
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
701
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
702
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
703
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
704
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
705
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
706
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
707
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
708
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
709
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
710
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
711
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
712
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
713
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
714
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
715
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
716
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
717
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
718
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
719
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
720
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
721
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
722
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
723
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
724
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
725
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
726
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
727
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
728
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
729
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
730
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
731
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
732
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
733
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
734
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
735
 
736
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
737
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
738
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
739
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
740
 
741
	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
742
 
743
	{ 0, 0, 0 },
744
};
745
 
746
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
747
{
748
    while(list->device_id)
749
    {
750
        if(dev==list->device_id)
751
            return list;
752
        list++;
753
    }
754
    return NULL;
755
}
756
 
757
const struct intel_device_info *
758
intel_detect_chipset(struct pci_device *pci)
759
{
760
    const struct pci_id_match *ent = NULL;
761
	const char *name = NULL;
762
	int i;
763
 
764
    ent = PciDevMatch(pci->device_id, intel_device_match);
765
 
766
    if(ent != NULL)
767
        return (const struct intel_device_info*)ent->match_data;
768
    else
769
        return &intel_generic_info;
770
 
771
#if 0
772
	for (i = 0; intel_chipsets[i].name != NULL; i++) {
773
		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
774
			name = intel_chipsets[i].name;
775
			break;
776
		}
777
	}
778
	if (name == NULL) {
779
		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
780
		name = "unknown";
781
	} else {
782
		xf86DrvMsg(scrn->scrnIndex, from,
783
			   "Integrated Graphics Chipset: Intel(R) %s\n",
784
			   name);
785
	}
786
 
787
	scrn->chipset = name;
788
#endif
789
 
790
}
791
 
792
 
3258 Serge 793
int drmIoctl(int fd, unsigned long request, void *arg)
794
{
795
    ioctl_t  io;
3254 Serge 796
 
3258 Serge 797
    io.handle   = fd;
798
    io.io_code  = request;
799
    io.input    = arg;
800
    io.inp_size = 64;
801
    io.output   = NULL;
802
    io.out_size = 0;
3254 Serge 803
 
3258 Serge 804
    return call_service(&io);
805
}
806