Subversion Repositories Kolibri OS

Rev

Rev 3254 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3254 Serge 1
//#include "../bitmap.h"
2
 
3
#include 
4
#include 
5
 
6
#include "sna.h"
7
 
8
 
9
const struct intel_device_info *
10
intel_detect_chipset(struct pci_device *pci);
11
 
12
//struct kgem_bo *create_bo(bitmap_t *bitmap);
13
 
14
static bool sna_solid_cache_init(struct sna *sna);
15
 
16
struct sna *sna_device;
17
 
18
void no_render_init(struct sna *sna)
19
{
20
    struct sna_render *render = &sna->render;
21
 
22
    memset (render,0, sizeof (*render));
23
 
24
    render->prefer_gpu = PREFER_GPU_BLT;
25
 
26
    render->vertices = render->vertex_data;
27
    render->vertex_size = ARRAY_SIZE(render->vertex_data);
28
 
29
//    render->composite = no_render_composite;
30
 
31
//    render->copy_boxes = no_render_copy_boxes;
32
//    render->copy = no_render_copy;
33
 
34
//    render->fill_boxes = no_render_fill_boxes;
35
//    render->fill = no_render_fill;
36
//    render->fill_one = no_render_fill_one;
37
//    render->clear = no_render_clear;
38
 
39
//    render->reset = no_render_reset;
40
//    render->flush = no_render_flush;
41
//    render->fini = no_render_fini;
42
 
43
//    sna->kgem.context_switch = no_render_context_switch;
44
//    sna->kgem.retire = no_render_retire;
45
 
46
//    if (sna->kgem.gen >= 60)
47
        sna->kgem.ring = KGEM_RENDER;
48
 
49
      sna_vertex_init(sna);
50
}
51
 
52
void sna_vertex_init(struct sna *sna)
53
{
54
//    pthread_mutex_init(&sna->render.lock, NULL);
55
//    pthread_cond_init(&sna->render.wait, NULL);
56
    sna->render.active = 0;
57
}
58
 
59
bool sna_accel_init(struct sna *sna)
60
{
61
    const char *backend;
62
 
63
//    list_init(&sna->deferred_free);
64
//    list_init(&sna->dirty_pixmaps);
65
//    list_init(&sna->active_pixmaps);
66
//    list_init(&sna->inactive_clock[0]);
67
//    list_init(&sna->inactive_clock[1]);
68
 
69
//    sna_accel_install_timers(sna);
70
 
71
 
72
    backend = "no";
73
    no_render_init(sna);
74
 
75
 	if (sna->info->gen >= 0100) {
76
/*	} else if (sna->info->gen >= 070) {
77
		if (gen7_render_init(sna))
78
			backend = "IvyBridge";  */
79
	} else if (sna->info->gen >= 060) {
80
		if (gen6_render_init(sna))
81
			backend = "SandyBridge";
82
/*	} else if (sna->info->gen >= 050) {
83
		if (gen5_render_init(sna))
84
			backend = "Ironlake";
85
	} else if (sna->info->gen >= 040) {
86
		if (gen4_render_init(sna))
87
			backend = "Broadwater/Crestline";
88
	} else if (sna->info->gen >= 030) {
89
		if (gen3_render_init(sna))
90
			backend = "gen3";
91
	} else if (sna->info->gen >= 020) {
92
		if (gen2_render_init(sna))
93
			backend = "gen2"; */
94
	}
95
 
96
	DBG(("%s(backend=%s, prefer_gpu=%x)\n",
97
	     __FUNCTION__, backend, sna->render.prefer_gpu));
98
 
99
    kgem_reset(&sna->kgem);
100
 
101
//    if (!sna_solid_cache_init(sna))
102
//        return false;
103
 
104
    sna_device = sna;
105
#if 0
106
    {
107
        struct kgem_bo *screen_bo;
108
        bitmap_t        screen;
109
 
110
        screen.pitch  = 1024*4;
111
        screen.gaddr  = 0;
112
        screen.width  = 1024;
113
        screen.height = 768;
114
        screen.obj    = (void*)-1;
115
 
116
        screen_bo = create_bo(&screen);
117
 
118
        sna->render.clear(sna, &screen, screen_bo);
119
    }
120
#endif
121
 
122
    return true;
123
}
124
 
125
int sna_init(uint32_t service)
126
{
127
    ioctl_t   io;
128
 
129
    static struct pci_device device;
130
    struct sna *sna;
131
 
132
    DBG(("%s\n", __FUNCTION__));
133
 
134
    sna = malloc(sizeof(struct sna));
135
    if (sna == NULL)
136
        return false;
137
 
138
    io.handle   = service;
3256 Serge 139
    io.io_code  = SRV_GET_PCI_INFO;
3254 Serge 140
    io.input    = &device;
141
    io.inp_size = sizeof(device);
142
    io.output   = NULL;
143
    io.out_size = 0;
144
 
145
    if (call_service(&io)!=0)
146
        return false;
147
 
148
    sna->PciInfo = &device;
149
 
150
  	sna->info = intel_detect_chipset(sna->PciInfo);
151
 
152
    kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
153
/*
154
    if (!xf86ReturnOptValBool(sna->Options,
155
                  OPTION_RELAXED_FENCING,
156
                  sna->kgem.has_relaxed_fencing)) {
157
        xf86DrvMsg(scrn->scrnIndex,
158
               sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
159
               "Disabling use of relaxed fencing\n");
160
        sna->kgem.has_relaxed_fencing = 0;
161
    }
162
    if (!xf86ReturnOptValBool(sna->Options,
163
                  OPTION_VMAP,
164
                  sna->kgem.has_vmap)) {
165
        xf86DrvMsg(scrn->scrnIndex,
166
               sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
167
               "Disabling use of vmap\n");
168
        sna->kgem.has_vmap = 0;
169
    }
170
*/
171
 
172
    /* Disable tiling by default */
173
    sna->tiling = SNA_TILING_DISABLE;
174
 
175
    /* Default fail-safe value of 75 Hz */
176
//    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
177
 
178
    sna->flags = 0;
179
 
180
    return sna_accel_init(sna);
181
}
182
 
183
#if 0
184
 
185
static bool sna_solid_cache_init(struct sna *sna)
186
{
187
    struct sna_solid_cache *cache = &sna->render.solid_cache;
188
 
189
    DBG(("%s\n", __FUNCTION__));
190
 
191
    cache->cache_bo =
192
        kgem_create_linear(&sna->kgem, sizeof(cache->color));
193
    if (!cache->cache_bo)
194
        return FALSE;
195
 
196
    /*
197
     * Initialise [0] with white since it is very common and filling the
198
     * zeroth slot simplifies some of the checks.
199
     */
200
    cache->color[0] = 0xffffffff;
201
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
202
    cache->bo[0]->pitch = 4;
203
    cache->dirty = 1;
204
    cache->size = 1;
205
    cache->last = 0;
206
 
207
    return TRUE;
208
}
209
 
210
void
211
sna_render_flush_solid(struct sna *sna)
212
{
213
    struct sna_solid_cache *cache = &sna->render.solid_cache;
214
 
215
    DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
216
    assert(cache->dirty);
217
    assert(cache->size);
218
 
219
    kgem_bo_write(&sna->kgem, cache->cache_bo,
220
              cache->color, cache->size*sizeof(uint32_t));
221
    cache->dirty = 0;
222
    cache->last = 0;
223
}
224
 
225
static void
226
sna_render_finish_solid(struct sna *sna, bool force)
227
{
228
    struct sna_solid_cache *cache = &sna->render.solid_cache;
229
    int i;
230
 
231
    DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
232
         force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
233
 
234
    if (!force && cache->cache_bo->domain != DOMAIN_GPU)
235
        return;
236
 
237
    if (cache->dirty)
238
        sna_render_flush_solid(sna);
239
 
240
    for (i = 0; i < cache->size; i++) {
241
        if (cache->bo[i] == NULL)
242
            continue;
243
 
244
        kgem_bo_destroy(&sna->kgem, cache->bo[i]);
245
        cache->bo[i] = NULL;
246
    }
247
    kgem_bo_destroy(&sna->kgem, cache->cache_bo);
248
 
249
    DBG(("sna_render_finish_solid reset\n"));
250
 
251
    cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
252
    cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
253
    cache->bo[0]->pitch = 4;
254
    if (force)
255
        cache->size = 1;
256
}
257
 
258
 
259
struct kgem_bo *
260
sna_render_get_solid(struct sna *sna, uint32_t color)
261
{
262
    struct sna_solid_cache *cache = &sna->render.solid_cache;
263
    int i;
264
 
265
    DBG(("%s: %08x\n", __FUNCTION__, color));
266
 
267
//    if ((color & 0xffffff) == 0) /* alpha only */
268
//        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
269
 
270
    if (color == 0xffffffff) {
271
        DBG(("%s(white)\n", __FUNCTION__));
272
        return kgem_bo_reference(cache->bo[0]);
273
    }
274
 
275
    if (cache->color[cache->last] == color) {
276
        DBG(("sna_render_get_solid(%d) = %x (last)\n",
277
             cache->last, color));
278
        return kgem_bo_reference(cache->bo[cache->last]);
279
    }
280
 
281
    for (i = 1; i < cache->size; i++) {
282
        if (cache->color[i] == color) {
283
            if (cache->bo[i] == NULL) {
284
                DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
285
                     i, color));
286
                goto create;
287
            } else {
288
                DBG(("sna_render_get_solid(%d) = %x (old)\n",
289
                     i, color));
290
                goto done;
291
            }
292
        }
293
    }
294
 
295
    sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
296
 
297
    i = cache->size++;
298
    cache->color[i] = color;
299
    cache->dirty = 1;
300
    DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
301
 
302
create:
303
    cache->bo[i] = kgem_create_proxy(cache->cache_bo,
304
                     i*sizeof(uint32_t), sizeof(uint32_t));
305
    cache->bo[i]->pitch = 4;
306
 
307
done:
308
    cache->last = i;
309
    return kgem_bo_reference(cache->bo[i]);
310
}
311
 
312
#endif
313
 
314
 
315
int sna_blit_copy(uint32_t dst_bitmap, int dst_x, int dst_y,
316
                  int w, int h, uint32_t src_bitmap, int src_x, int src_y)
317
 
318
{
319
    struct sna_copy_op copy;
320
    struct kgem_bo src_bo, dst_bo;
321
 
322
    memset(&src_bo, 0, sizeof(src_bo));
323
    memset(&dst_bo, 0, sizeof(dst_bo));
324
 
325
//    src_bo.gaddr  = src_bitmap->gaddr;
326
//    src_bo.pitch  = src_bitmap->pitch;
327
//    src_bo.tiling = 0;
328
 
329
//    dst_bo.gaddr  = dst_bitmap->gaddr;
330
//    dst_bo.pitch  = dst_bitmap->pitch;
331
//    dst_bo.tiling = 0;
332
 
333
    memset(©, 0, sizeof(copy));
334
 
335
    sna_device->render.copy(sna_device, GXcopy, NULL, &src_bo, NULL, &dst_bo, ©);
336
    copy.blt(sna_device, ©, src_x, src_y, w, h, dst_x, dst_y);
337
    copy.done(sna_device, ©);
338
 
339
 
340
 
341
//    _kgem_submit(&sna_device->kgem, &execbuffer);
342
 
343
};
344
 
345
 
346
/*
347
 
348
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
349
                  int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
350
                  bitmap_t *mask_bitmap)
351
 
352
{
353
    struct sna_composite_op cop;
354
    batchbuffer_t  execbuffer;
355
    BoxRec box;
356
 
357
    struct kgem_bo src_bo, mask_bo, dst_bo;
358
 
359
    memset(&cop, 0, sizeof(cop));
360
    memset(&execbuffer,  0, sizeof(execbuffer));
361
    memset(&src_bo, 0, sizeof(src_bo));
362
    memset(&dst_bo, 0, sizeof(dst_bo));
363
    memset(&mask_bo, 0, sizeof(mask_bo));
364
 
365
    src_bo.gaddr  = src_bitmap->gaddr;
366
    src_bo.pitch  = src_bitmap->pitch;
367
    src_bo.tiling = 0;
368
 
369
    dst_bo.gaddr  = dst_bitmap->gaddr;
370
    dst_bo.pitch  = dst_bitmap->pitch;
371
    dst_bo.tiling = 0;
372
 
373
    mask_bo.gaddr  = mask_bitmap->gaddr;
374
    mask_bo.pitch  = mask_bitmap->pitch;
375
    mask_bo.tiling = 0;
376
 
377
    box.x1 = dst_x;
378
    box.y1 = dst_y;
379
    box.x2 = dst_x+w;
380
    box.y2 = dst_y+h;
381
 
382
    sna_device->render.composite(sna_device, 0,
383
                                 src_bitmap, &src_bo,
384
                                 mask_bitmap, &mask_bo,
385
                                 dst_bitmap, &dst_bo,
386
                                 src_x, src_y,
387
                                 src_x, src_y,
388
                                 dst_x, dst_y,
389
                                 w, h, &cop);
390
 
391
    cop.box(sna_device, &cop, &box);
392
    cop.done(sna_device, &cop);
393
 
394
    INIT_LIST_HEAD(&execbuffer.objects);
395
    list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
396
    list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
397
 
398
    _kgem_submit(&sna_device->kgem, &execbuffer);
399
 
400
};
401
 
402
*/
403
 
404
static const struct intel_device_info intel_generic_info = {
405
	.gen = -1,
406
};
407
 
408
static const struct intel_device_info intel_i915_info = {
409
	.gen = 030,
410
};
411
static const struct intel_device_info intel_i945_info = {
412
	.gen = 031,
413
};
414
 
415
static const struct intel_device_info intel_g33_info = {
416
	.gen = 033,
417
};
418
 
419
static const struct intel_device_info intel_i965_info = {
420
	.gen = 040,
421
};
422
 
423
static const struct intel_device_info intel_g4x_info = {
424
	.gen = 045,
425
};
426
 
427
static const struct intel_device_info intel_ironlake_info = {
428
	.gen = 050,
429
};
430
 
431
static const struct intel_device_info intel_sandybridge_info = {
432
	.gen = 060,
433
};
434
 
435
static const struct intel_device_info intel_ivybridge_info = {
436
	.gen = 070,
437
};
438
 
439
static const struct intel_device_info intel_valleyview_info = {
440
	.gen = 071,
441
};
442
 
443
static const struct intel_device_info intel_haswell_info = {
444
	.gen = 075,
445
};
446
 
447
#define INTEL_DEVICE_MATCH(d,i) \
448
    { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
449
 
450
 
451
static const struct pci_id_match intel_device_match[] = {
452
 
453
 
454
	INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
455
	INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
456
	INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
457
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
458
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
459
	INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
460
 
461
	INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
462
	INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
463
	INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
464
	INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
465
	/* Another marketing win: Q35 is another g33 device not a gen4 part
466
	 * like its G35 brethren.
467
	 */
468
	INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
469
 
470
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
471
	INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
472
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
473
	INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
474
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
475
	INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
476
 
477
	INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
478
	INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
479
	INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
480
	INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
481
	INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
482
	INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
483
	INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
484
 
485
	INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
486
	INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
487
 
488
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
489
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
490
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
491
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
492
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
493
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
494
	INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
495
 
496
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
497
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
498
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
499
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
500
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
501
	INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
502
 
503
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
504
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
505
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
506
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
507
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
508
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
509
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
510
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
511
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
512
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
513
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
514
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
515
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
516
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
517
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
518
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
519
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
520
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
521
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
522
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
523
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
524
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
525
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
526
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
527
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
528
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
529
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
530
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
531
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
532
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
533
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
534
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
535
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
536
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
537
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
538
	INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
539
 
540
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
541
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
542
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
543
	INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
544
 
545
	INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
546
 
547
	{ 0, 0, 0 },
548
};
549
 
550
const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
551
{
552
    while(list->device_id)
553
    {
554
        if(dev==list->device_id)
555
            return list;
556
        list++;
557
    }
558
    return NULL;
559
}
560
 
561
const struct intel_device_info *
562
intel_detect_chipset(struct pci_device *pci)
563
{
564
    const struct pci_id_match *ent = NULL;
565
	const char *name = NULL;
566
	int i;
567
 
568
    ent = PciDevMatch(pci->device_id, intel_device_match);
569
 
570
    if(ent != NULL)
571
        return (const struct intel_device_info*)ent->match_data;
572
    else
573
        return &intel_generic_info;
574
 
575
#if 0
576
	for (i = 0; intel_chipsets[i].name != NULL; i++) {
577
		if (DEVICE_ID(pci) == intel_chipsets[i].token) {
578
			name = intel_chipsets[i].name;
579
			break;
580
		}
581
	}
582
	if (name == NULL) {
583
		xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
584
		name = "unknown";
585
	} else {
586
		xf86DrvMsg(scrn->scrnIndex, from,
587
			   "Integrated Graphics Chipset: Intel(R) %s\n",
588
			   name);
589
	}
590
 
591
	scrn->chipset = name;
592
#endif
593
 
594
}
595