Subversion Repositories Kolibri OS

Rev

Rev 3258 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. //#include "../bitmap.h"
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5.  
  6. #include "sna.h"
  7.  
  8. #include <pixlib2.h>
  9.  
  10. static struct sna_fb sna_fb;
  11.  
  12. typedef struct __attribute__((packed))
  13. {
  14.   unsigned      handle;
  15.   unsigned      io_code;
  16.   void          *input;
  17.   int           inp_size;
  18.   void          *output;
  19.   int           out_size;
  20. }ioctl_t;
  21.  
  22.  
  23. static int call_service(ioctl_t *io)
  24. {
  25.   int retval;
  26.  
  27.   asm volatile("int $0x40"
  28.       :"=a"(retval)
  29.       :"a"(68),"b"(17),"c"(io)
  30.       :"memory","cc");
  31.  
  32.   return retval;
  33. };
  34.  
  35. const struct intel_device_info *
  36. intel_detect_chipset(struct pci_device *pci);
  37.  
  38. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  39.  
  40. static bool sna_solid_cache_init(struct sna *sna);
  41.  
  42. struct sna *sna_device;
  43.  
  44. static void no_render_reset(struct sna *sna)
  45. {
  46.         (void)sna;
  47. }
  48.  
  49. void no_render_init(struct sna *sna)
  50. {
  51.     struct sna_render *render = &sna->render;
  52.  
  53.     memset (render,0, sizeof (*render));
  54.  
  55.     render->prefer_gpu = PREFER_GPU_BLT;
  56.  
  57.     render->vertices = render->vertex_data;
  58.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  59.  
  60. //    render->composite = no_render_composite;
  61.  
  62. //    render->copy_boxes = no_render_copy_boxes;
  63. //    render->copy = no_render_copy;
  64.  
  65. //    render->fill_boxes = no_render_fill_boxes;
  66. //    render->fill = no_render_fill;
  67. //    render->fill_one = no_render_fill_one;
  68. //    render->clear = no_render_clear;
  69.  
  70.     render->reset = no_render_reset;
  71. //    render->flush = no_render_flush;
  72. //    render->fini = no_render_fini;
  73.  
  74. //    sna->kgem.context_switch = no_render_context_switch;
  75. //    sna->kgem.retire = no_render_retire;
  76.  
  77.       if (sna->kgem.gen >= 60)
  78.         sna->kgem.ring = KGEM_RENDER;
  79.  
  80.       sna_vertex_init(sna);
  81. }
  82.  
  83. void sna_vertex_init(struct sna *sna)
  84. {
  85. //    pthread_mutex_init(&sna->render.lock, NULL);
  86. //    pthread_cond_init(&sna->render.wait, NULL);
  87.     sna->render.active = 0;
  88. }
  89.  
  90. bool sna_accel_init(struct sna *sna)
  91. {
  92.     const char *backend;
  93.  
  94. //    list_init(&sna->deferred_free);
  95. //    list_init(&sna->dirty_pixmaps);
  96. //    list_init(&sna->active_pixmaps);
  97. //    list_init(&sna->inactive_clock[0]);
  98. //    list_init(&sna->inactive_clock[1]);
  99.  
  100. //    sna_accel_install_timers(sna);
  101.  
  102.  
  103.     backend = "no";
  104.     no_render_init(sna);
  105.  
  106.         if (sna->info->gen >= 0100) {
  107. /*      } else if (sna->info->gen >= 070) {
  108.                 if (gen7_render_init(sna))
  109.                         backend = "IvyBridge";  */
  110.         } else if (sna->info->gen >= 060) {
  111.                 if (gen6_render_init(sna))
  112.                         backend = "SandyBridge";
  113. /*      } else if (sna->info->gen >= 050) {
  114.                 if (gen5_render_init(sna))
  115.                         backend = "Ironlake";
  116.         } else if (sna->info->gen >= 040) {
  117.                 if (gen4_render_init(sna))
  118.                         backend = "Broadwater/Crestline";
  119.         } else if (sna->info->gen >= 030) {
  120.                 if (gen3_render_init(sna))
  121.                         backend = "gen3";
  122.         } else if (sna->info->gen >= 020) {
  123.                 if (gen2_render_init(sna))
  124.                         backend = "gen2"; */
  125.         }
  126.  
  127.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  128.              __FUNCTION__, backend, sna->render.prefer_gpu));
  129.  
  130.     kgem_reset(&sna->kgem);
  131.  
  132. //    if (!sna_solid_cache_init(sna))
  133. //        return false;
  134.  
  135.     sna_device = sna;
  136.  
  137.  
  138.     return kgem_init_fb(&sna->kgem, &sna_fb);
  139. }
  140.  
  141. int sna_init(uint32_t service)
  142. {
  143.     ioctl_t   io;
  144.  
  145.     static struct pci_device device;
  146.     struct sna *sna;
  147.  
  148.     DBG(("%s\n", __FUNCTION__));
  149.  
  150.     sna = malloc(sizeof(struct sna));
  151.     if (sna == NULL)
  152.         return false;
  153.  
  154.     io.handle   = service;
  155.     io.io_code  = SRV_GET_PCI_INFO;
  156.     io.input    = &device;
  157.     io.inp_size = sizeof(device);
  158.     io.output   = NULL;
  159.     io.out_size = 0;
  160.  
  161.     if (call_service(&io)!=0)
  162.         return false;
  163.  
  164.     sna->PciInfo = &device;
  165.  
  166.         sna->info = intel_detect_chipset(sna->PciInfo);
  167.  
  168.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  169. /*
  170.     if (!xf86ReturnOptValBool(sna->Options,
  171.                   OPTION_RELAXED_FENCING,
  172.                   sna->kgem.has_relaxed_fencing)) {
  173.         xf86DrvMsg(scrn->scrnIndex,
  174.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  175.                "Disabling use of relaxed fencing\n");
  176.         sna->kgem.has_relaxed_fencing = 0;
  177.     }
  178.     if (!xf86ReturnOptValBool(sna->Options,
  179.                   OPTION_VMAP,
  180.                   sna->kgem.has_vmap)) {
  181.         xf86DrvMsg(scrn->scrnIndex,
  182.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  183.                "Disabling use of vmap\n");
  184.         sna->kgem.has_vmap = 0;
  185.     }
  186. */
  187.  
  188.     /* Disable tiling by default */
  189.     sna->tiling = SNA_TILING_DISABLE;
  190.  
  191.     /* Default fail-safe value of 75 Hz */
  192. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  193.  
  194.     sna->flags = 0;
  195.  
  196.     return sna_accel_init(sna);
  197. }
  198.  
  199. #if 0
  200.  
  201. static bool sna_solid_cache_init(struct sna *sna)
  202. {
  203.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  204.  
  205.     DBG(("%s\n", __FUNCTION__));
  206.  
  207.     cache->cache_bo =
  208.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  209.     if (!cache->cache_bo)
  210.         return FALSE;
  211.  
  212.     /*
  213.      * Initialise [0] with white since it is very common and filling the
  214.      * zeroth slot simplifies some of the checks.
  215.      */
  216.     cache->color[0] = 0xffffffff;
  217.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  218.     cache->bo[0]->pitch = 4;
  219.     cache->dirty = 1;
  220.     cache->size = 1;
  221.     cache->last = 0;
  222.  
  223.     return TRUE;
  224. }
  225.  
  226. void
  227. sna_render_flush_solid(struct sna *sna)
  228. {
  229.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  230.  
  231.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  232.     assert(cache->dirty);
  233.     assert(cache->size);
  234.  
  235.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  236.               cache->color, cache->size*sizeof(uint32_t));
  237.     cache->dirty = 0;
  238.     cache->last = 0;
  239. }
  240.  
  241. static void
  242. sna_render_finish_solid(struct sna *sna, bool force)
  243. {
  244.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  245.     int i;
  246.  
  247.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  248.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  249.  
  250.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  251.         return;
  252.  
  253.     if (cache->dirty)
  254.         sna_render_flush_solid(sna);
  255.  
  256.     for (i = 0; i < cache->size; i++) {
  257.         if (cache->bo[i] == NULL)
  258.             continue;
  259.  
  260.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  261.         cache->bo[i] = NULL;
  262.     }
  263.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  264.  
  265.     DBG(("sna_render_finish_solid reset\n"));
  266.  
  267.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  268.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  269.     cache->bo[0]->pitch = 4;
  270.     if (force)
  271.         cache->size = 1;
  272. }
  273.  
  274.  
  275. struct kgem_bo *
  276. sna_render_get_solid(struct sna *sna, uint32_t color)
  277. {
  278.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  279.     int i;
  280.  
  281.     DBG(("%s: %08x\n", __FUNCTION__, color));
  282.  
  283. //    if ((color & 0xffffff) == 0) /* alpha only */
  284. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  285.  
  286.     if (color == 0xffffffff) {
  287.         DBG(("%s(white)\n", __FUNCTION__));
  288.         return kgem_bo_reference(cache->bo[0]);
  289.     }
  290.  
  291.     if (cache->color[cache->last] == color) {
  292.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  293.              cache->last, color));
  294.         return kgem_bo_reference(cache->bo[cache->last]);
  295.     }
  296.  
  297.     for (i = 1; i < cache->size; i++) {
  298.         if (cache->color[i] == color) {
  299.             if (cache->bo[i] == NULL) {
  300.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  301.                      i, color));
  302.                 goto create;
  303.             } else {
  304.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  305.                      i, color));
  306.                 goto done;
  307.             }
  308.         }
  309.     }
  310.  
  311.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  312.  
  313.     i = cache->size++;
  314.     cache->color[i] = color;
  315.     cache->dirty = 1;
  316.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  317.  
  318. create:
  319.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  320.                      i*sizeof(uint32_t), sizeof(uint32_t));
  321.     cache->bo[i]->pitch = 4;
  322.  
  323. done:
  324.     cache->last = i;
  325.     return kgem_bo_reference(cache->bo[i]);
  326. }
  327.  
  328. #endif
  329.  
  330.  
  331. int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
  332.                   int w, int h, int src_x, int src_y)
  333.  
  334. {
  335.     struct sna_copy_op copy;
  336.     struct _Pixmap src, dst;
  337.     struct kgem_bo *src_bo;
  338.  
  339.     memset(&src, 0, sizeof(src));
  340.     memset(&dst, 0, sizeof(dst));
  341.  
  342.     src.drawable.bitsPerPixel = 32;
  343.     src.drawable.width  = src_bitmap->width;
  344.     src.drawable.height = src_bitmap->height;
  345.  
  346.     dst.drawable.bitsPerPixel = 32;
  347.     dst.drawable.width  = sna_fb.width;
  348.     dst.drawable.height = sna_fb.height;
  349.  
  350.     memset(&copy, 0, sizeof(copy));
  351.  
  352.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  353.    
  354.     if( sna_device->render.copy(sna_device, GXcopy,
  355.                                 &src, src_bo,
  356.                                 &dst, sna_fb.fb_bo, &copy) )
  357.     {                            
  358.     copy.blt(sna_device, &copy, src_x, src_y, w, h, dst_x, dst_y);
  359.     copy.done(sna_device, &copy);
  360.     }
  361.  
  362.     kgem_submit(&sna_device->kgem);
  363.  
  364. //    __asm__ __volatile__("int3");
  365.  
  366. };
  367.  
  368. int sna_create_bitmap(bitmap_t *bitmap)
  369. {
  370.         struct kgem_bo *bo;
  371.  
  372.     bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
  373.                         32,I915_TILING_NONE, CREATE_CPU_MAP);
  374.  
  375.     if(bo == NULL)
  376.         goto err_1;
  377.      
  378.     void *map = kgem_bo_map(&sna_device->kgem, bo);
  379.     if(map == NULL)
  380.         goto err_2;
  381.        
  382.     bitmap->handle = (uint32_t)bo;
  383.     bitmap->pitch  = bo->pitch;
  384.     bitmap->data   = map;
  385.    
  386.     return 0;
  387.    
  388. err_2:
  389.     kgem_bo_destroy(&sna_device->kgem, bo);
  390.    
  391. err_1:
  392.     return -1;        
  393. };
  394. /*
  395.  
  396. int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
  397.                   int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
  398.                   bitmap_t *mask_bitmap)
  399.  
  400. {
  401.     struct sna_composite_op cop;
  402.     batchbuffer_t  execbuffer;
  403.     BoxRec box;
  404.  
  405.     struct kgem_bo src_bo, mask_bo, dst_bo;
  406.  
  407.     memset(&cop, 0, sizeof(cop));
  408.     memset(&execbuffer,  0, sizeof(execbuffer));
  409.     memset(&src_bo, 0, sizeof(src_bo));
  410.     memset(&dst_bo, 0, sizeof(dst_bo));
  411.     memset(&mask_bo, 0, sizeof(mask_bo));
  412.  
  413.     src_bo.gaddr  = src_bitmap->gaddr;
  414.     src_bo.pitch  = src_bitmap->pitch;
  415.     src_bo.tiling = 0;
  416.  
  417.     dst_bo.gaddr  = dst_bitmap->gaddr;
  418.     dst_bo.pitch  = dst_bitmap->pitch;
  419.     dst_bo.tiling = 0;
  420.  
  421.     mask_bo.gaddr  = mask_bitmap->gaddr;
  422.     mask_bo.pitch  = mask_bitmap->pitch;
  423.     mask_bo.tiling = 0;
  424.  
  425.     box.x1 = dst_x;
  426.     box.y1 = dst_y;
  427.     box.x2 = dst_x+w;
  428.     box.y2 = dst_y+h;
  429.  
  430.     sna_device->render.composite(sna_device, 0,
  431.                                  src_bitmap, &src_bo,
  432.                                  mask_bitmap, &mask_bo,
  433.                                  dst_bitmap, &dst_bo,
  434.                                  src_x, src_y,
  435.                                  src_x, src_y,
  436.                                  dst_x, dst_y,
  437.                                  w, h, &cop);
  438.  
  439.     cop.box(sna_device, &cop, &box);
  440.     cop.done(sna_device, &cop);
  441.  
  442.     INIT_LIST_HEAD(&execbuffer.objects);
  443.     list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
  444.     list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
  445.  
  446.     _kgem_submit(&sna_device->kgem, &execbuffer);
  447.  
  448. };
  449.  
  450. */
  451.  
  452. static const struct intel_device_info intel_generic_info = {
  453.         .gen = -1,
  454. };
  455.  
  456. static const struct intel_device_info intel_i915_info = {
  457.         .gen = 030,
  458. };
  459. static const struct intel_device_info intel_i945_info = {
  460.         .gen = 031,
  461. };
  462.  
  463. static const struct intel_device_info intel_g33_info = {
  464.         .gen = 033,
  465. };
  466.  
  467. static const struct intel_device_info intel_i965_info = {
  468.         .gen = 040,
  469. };
  470.  
  471. static const struct intel_device_info intel_g4x_info = {
  472.         .gen = 045,
  473. };
  474.  
  475. static const struct intel_device_info intel_ironlake_info = {
  476.         .gen = 050,
  477. };
  478.  
  479. static const struct intel_device_info intel_sandybridge_info = {
  480.         .gen = 060,
  481. };
  482.  
  483. static const struct intel_device_info intel_ivybridge_info = {
  484.         .gen = 070,
  485. };
  486.  
  487. static const struct intel_device_info intel_valleyview_info = {
  488.         .gen = 071,
  489. };
  490.  
  491. static const struct intel_device_info intel_haswell_info = {
  492.         .gen = 075,
  493. };
  494.  
  495. #define INTEL_DEVICE_MATCH(d,i) \
  496.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  497.  
  498.  
  499. static const struct pci_id_match intel_device_match[] = {
  500.  
  501.  
  502.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  503.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  504.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  505.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  506.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  507.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  508.  
  509.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  510.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  511.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  512.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  513.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  514.          * like its G35 brethren.
  515.          */
  516.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  517.  
  518.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  519.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  520.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  521.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  522.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  523.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  524.  
  525.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  526.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  527.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  528.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  529.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  530.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  531.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  532.  
  533.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  534.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  535.  
  536.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  537.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  538.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  539.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  540.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  541.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  542.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  543.  
  544.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  545.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  546.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  547.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  548.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  549.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  550.  
  551.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  552.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  553.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  554.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  555.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  556.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  557.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  558.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  559.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  560.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  561.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  562.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  563.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  564.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  565.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  566.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  567.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  568.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  569.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  570.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  571.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  572.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  573.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  574.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  575.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  576.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  577.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  578.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  579.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  580.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  581.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  582.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  583.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  584.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  585.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  586.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  587.  
  588.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  589.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  590.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  591.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  592.  
  593.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  594.  
  595.         { 0, 0, 0 },
  596. };
  597.  
  598. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  599. {
  600.     while(list->device_id)
  601.     {
  602.         if(dev==list->device_id)
  603.             return list;
  604.         list++;
  605.     }
  606.     return NULL;
  607. }
  608.  
  609. const struct intel_device_info *
  610. intel_detect_chipset(struct pci_device *pci)
  611. {
  612.     const struct pci_id_match *ent = NULL;
  613.         const char *name = NULL;
  614.         int i;
  615.  
  616.     ent = PciDevMatch(pci->device_id, intel_device_match);
  617.    
  618.     if(ent != NULL)
  619.         return (const struct intel_device_info*)ent->match_data;
  620.     else    
  621.         return &intel_generic_info;
  622.        
  623. #if 0        
  624.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  625.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  626.                         name = intel_chipsets[i].name;
  627.                         break;
  628.                 }
  629.         }
  630.         if (name == NULL) {
  631.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  632.                 name = "unknown";
  633.         } else {
  634.                 xf86DrvMsg(scrn->scrnIndex, from,
  635.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  636.                            name);
  637.         }
  638.  
  639.         scrn->chipset = name;
  640. #endif
  641.        
  642. }
  643.  
  644.  
  645. int drmIoctl(int fd, unsigned long request, void *arg)
  646. {
  647.     ioctl_t  io;
  648.  
  649.     io.handle   = fd;
  650.     io.io_code  = request;
  651.     io.input    = arg;
  652.     io.inp_size = 64;
  653.     io.output   = NULL;
  654.     io.out_size = 0;
  655.  
  656.     return call_service(&io);
  657. }
  658.  
  659.  
  660.  
  661.