Subversion Repositories Kolibri OS

Rev

Rev 3256 | Blame | Last modification | View Log | Download | RSS feed

  1. //#include "../bitmap.h"
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5.  
  6. #include "sna.h"
  7.  
  8. typedef struct __attribute__((packed))
  9. {
  10.   unsigned      handle;
  11.   unsigned      io_code;
  12.   void          *input;
  13.   int           inp_size;
  14.   void          *output;
  15.   int           out_size;
  16. }ioctl_t;
  17.  
  18.  
  19. static int call_service(ioctl_t *io)
  20. {
  21.   int retval;
  22.  
  23.   asm volatile("int $0x40"
  24.       :"=a"(retval)
  25.       :"a"(68),"b"(17),"c"(io)
  26.       :"memory","cc");
  27.  
  28.   return retval;
  29. };
  30.  
  31. const struct intel_device_info *
  32. intel_detect_chipset(struct pci_device *pci);
  33.  
  34. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  35.  
  36. static bool sna_solid_cache_init(struct sna *sna);
  37.  
  38. struct sna *sna_device;
  39.  
  40. static void no_render_reset(struct sna *sna)
  41. {
  42.         (void)sna;
  43. }
  44.  
  45. void no_render_init(struct sna *sna)
  46. {
  47.     struct sna_render *render = &sna->render;
  48.  
  49.     memset (render,0, sizeof (*render));
  50.  
  51.     render->prefer_gpu = PREFER_GPU_BLT;
  52.  
  53.     render->vertices = render->vertex_data;
  54.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  55.  
  56. //    render->composite = no_render_composite;
  57.  
  58. //    render->copy_boxes = no_render_copy_boxes;
  59. //    render->copy = no_render_copy;
  60.  
  61. //    render->fill_boxes = no_render_fill_boxes;
  62. //    render->fill = no_render_fill;
  63. //    render->fill_one = no_render_fill_one;
  64. //    render->clear = no_render_clear;
  65.  
  66.     render->reset = no_render_reset;
  67.     render->flush = no_render_flush;
  68. //    render->fini = no_render_fini;
  69.  
  70. //    sna->kgem.context_switch = no_render_context_switch;
  71. //    sna->kgem.retire = no_render_retire;
  72.  
  73.       if (sna->kgem.gen >= 60)
  74.         sna->kgem.ring = KGEM_RENDER;
  75.  
  76.       sna_vertex_init(sna);
  77. }
  78.  
  79. void sna_vertex_init(struct sna *sna)
  80. {
  81. //    pthread_mutex_init(&sna->render.lock, NULL);
  82. //    pthread_cond_init(&sna->render.wait, NULL);
  83.     sna->render.active = 0;
  84. }
  85.  
  86. bool sna_accel_init(struct sna *sna)
  87. {
  88.     const char *backend;
  89.  
  90. //    list_init(&sna->deferred_free);
  91. //    list_init(&sna->dirty_pixmaps);
  92. //    list_init(&sna->active_pixmaps);
  93. //    list_init(&sna->inactive_clock[0]);
  94. //    list_init(&sna->inactive_clock[1]);
  95.  
  96. //    sna_accel_install_timers(sna);
  97.  
  98.  
  99.     backend = "no";
  100.     no_render_init(sna);
  101.  
  102.         if (sna->info->gen >= 0100) {
  103. /*      } else if (sna->info->gen >= 070) {
  104.                 if (gen7_render_init(sna))
  105.                         backend = "IvyBridge";  */
  106.         } else if (sna->info->gen >= 060) {
  107.                 if (gen6_render_init(sna))
  108.                         backend = "SandyBridge";
  109. /*      } else if (sna->info->gen >= 050) {
  110.                 if (gen5_render_init(sna))
  111.                         backend = "Ironlake";
  112.         } else if (sna->info->gen >= 040) {
  113.                 if (gen4_render_init(sna))
  114.                         backend = "Broadwater/Crestline";
  115.         } else if (sna->info->gen >= 030) {
  116.                 if (gen3_render_init(sna))
  117.                         backend = "gen3";
  118.         } else if (sna->info->gen >= 020) {
  119.                 if (gen2_render_init(sna))
  120.                         backend = "gen2"; */
  121.         }
  122.  
  123.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  124.              __FUNCTION__, backend, sna->render.prefer_gpu));
  125.  
  126.     kgem_reset(&sna->kgem);
  127.  
  128. //    if (!sna_solid_cache_init(sna))
  129. //        return false;
  130.  
  131.     sna_device = sna;
  132. #if 0
  133.     {
  134.         struct kgem_bo *screen_bo;
  135.         bitmap_t        screen;
  136.  
  137.         screen.pitch  = 1024*4;
  138.         screen.gaddr  = 0;
  139.         screen.width  = 1024;
  140.         screen.height = 768;
  141.         screen.obj    = (void*)-1;
  142.  
  143.         screen_bo = create_bo(&screen);
  144.  
  145.         sna->render.clear(sna, &screen, screen_bo);
  146.     }
  147. #endif
  148.  
  149.     return true;
  150. }
  151.  
  152. int sna_init(uint32_t service)
  153. {
  154.     ioctl_t   io;
  155.  
  156.     static struct pci_device device;
  157.     struct sna *sna;
  158.  
  159.     DBG(("%s\n", __FUNCTION__));
  160.  
  161.     sna = malloc(sizeof(struct sna));
  162.     if (sna == NULL)
  163.         return false;
  164.  
  165.     io.handle   = service;
  166.     io.io_code  = SRV_GET_PCI_INFO;
  167.     io.input    = &device;
  168.     io.inp_size = sizeof(device);
  169.     io.output   = NULL;
  170.     io.out_size = 0;
  171.  
  172.     if (call_service(&io)!=0)
  173.         return false;
  174.  
  175.     sna->PciInfo = &device;
  176.  
  177.         sna->info = intel_detect_chipset(sna->PciInfo);
  178.  
  179.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  180. /*
  181.     if (!xf86ReturnOptValBool(sna->Options,
  182.                   OPTION_RELAXED_FENCING,
  183.                   sna->kgem.has_relaxed_fencing)) {
  184.         xf86DrvMsg(scrn->scrnIndex,
  185.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  186.                "Disabling use of relaxed fencing\n");
  187.         sna->kgem.has_relaxed_fencing = 0;
  188.     }
  189.     if (!xf86ReturnOptValBool(sna->Options,
  190.                   OPTION_VMAP,
  191.                   sna->kgem.has_vmap)) {
  192.         xf86DrvMsg(scrn->scrnIndex,
  193.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  194.                "Disabling use of vmap\n");
  195.         sna->kgem.has_vmap = 0;
  196.     }
  197. */
  198.  
  199.     /* Disable tiling by default */
  200.     sna->tiling = SNA_TILING_DISABLE;
  201.  
  202.     /* Default fail-safe value of 75 Hz */
  203. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  204.  
  205.     sna->flags = 0;
  206.  
  207.     return sna_accel_init(sna);
  208. }
  209.  
  210. #if 0
  211.  
  212. static bool sna_solid_cache_init(struct sna *sna)
  213. {
  214.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  215.  
  216.     DBG(("%s\n", __FUNCTION__));
  217.  
  218.     cache->cache_bo =
  219.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  220.     if (!cache->cache_bo)
  221.         return FALSE;
  222.  
  223.     /*
  224.      * Initialise [0] with white since it is very common and filling the
  225.      * zeroth slot simplifies some of the checks.
  226.      */
  227.     cache->color[0] = 0xffffffff;
  228.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  229.     cache->bo[0]->pitch = 4;
  230.     cache->dirty = 1;
  231.     cache->size = 1;
  232.     cache->last = 0;
  233.  
  234.     return TRUE;
  235. }
  236.  
  237. void
  238. sna_render_flush_solid(struct sna *sna)
  239. {
  240.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  241.  
  242.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  243.     assert(cache->dirty);
  244.     assert(cache->size);
  245.  
  246.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  247.               cache->color, cache->size*sizeof(uint32_t));
  248.     cache->dirty = 0;
  249.     cache->last = 0;
  250. }
  251.  
  252. static void
  253. sna_render_finish_solid(struct sna *sna, bool force)
  254. {
  255.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  256.     int i;
  257.  
  258.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  259.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  260.  
  261.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  262.         return;
  263.  
  264.     if (cache->dirty)
  265.         sna_render_flush_solid(sna);
  266.  
  267.     for (i = 0; i < cache->size; i++) {
  268.         if (cache->bo[i] == NULL)
  269.             continue;
  270.  
  271.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  272.         cache->bo[i] = NULL;
  273.     }
  274.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  275.  
  276.     DBG(("sna_render_finish_solid reset\n"));
  277.  
  278.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  279.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  280.     cache->bo[0]->pitch = 4;
  281.     if (force)
  282.         cache->size = 1;
  283. }
  284.  
  285.  
  286. struct kgem_bo *
  287. sna_render_get_solid(struct sna *sna, uint32_t color)
  288. {
  289.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  290.     int i;
  291.  
  292.     DBG(("%s: %08x\n", __FUNCTION__, color));
  293.  
  294. //    if ((color & 0xffffff) == 0) /* alpha only */
  295. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  296.  
  297.     if (color == 0xffffffff) {
  298.         DBG(("%s(white)\n", __FUNCTION__));
  299.         return kgem_bo_reference(cache->bo[0]);
  300.     }
  301.  
  302.     if (cache->color[cache->last] == color) {
  303.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  304.              cache->last, color));
  305.         return kgem_bo_reference(cache->bo[cache->last]);
  306.     }
  307.  
  308.     for (i = 1; i < cache->size; i++) {
  309.         if (cache->color[i] == color) {
  310.             if (cache->bo[i] == NULL) {
  311.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  312.                      i, color));
  313.                 goto create;
  314.             } else {
  315.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  316.                      i, color));
  317.                 goto done;
  318.             }
  319.         }
  320.     }
  321.  
  322.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  323.  
  324.     i = cache->size++;
  325.     cache->color[i] = color;
  326.     cache->dirty = 1;
  327.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  328.  
  329. create:
  330.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  331.                      i*sizeof(uint32_t), sizeof(uint32_t));
  332.     cache->bo[i]->pitch = 4;
  333.  
  334. done:
  335.     cache->last = i;
  336.     return kgem_bo_reference(cache->bo[i]);
  337. }
  338.  
  339. #endif
  340.  
  341.  
  342. int sna_blit_copy(uint32_t dst_bitmap, int dst_x, int dst_y,
  343.                   int w, int h, uint32_t src_bitmap, int src_x, int src_y)
  344.  
  345. {
  346.     struct sna_copy_op copy;
  347.     struct kgem_bo src_bo, dst_bo;
  348.  
  349.     memset(&src_bo, 0, sizeof(src_bo));
  350.     memset(&dst_bo, 0, sizeof(dst_bo));
  351.  
  352. //    src_bo.gaddr  = src_bitmap->gaddr;
  353. //    src_bo.pitch  = src_bitmap->pitch;
  354. //    src_bo.tiling = 0;
  355.  
  356. //    dst_bo.gaddr  = dst_bitmap->gaddr;
  357. //    dst_bo.pitch  = dst_bitmap->pitch;
  358. //    dst_bo.tiling = 0;
  359.  
  360.     memset(&copy, 0, sizeof(copy));
  361.  
  362.     sna_device->render.copy(sna_device, GXcopy, NULL, &src_bo, NULL, &dst_bo, &copy);
  363.     copy.blt(sna_device, &copy, src_x, src_y, w, h, dst_x, dst_y);
  364.     copy.done(sna_device, &copy);
  365.  
  366.  
  367.  
  368. //    _kgem_submit(&sna_device->kgem, &execbuffer);
  369.  
  370. };
  371.  
  372.  
  373. /*
  374.  
  375. int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
  376.                   int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
  377.                   bitmap_t *mask_bitmap)
  378.  
  379. {
  380.     struct sna_composite_op cop;
  381.     batchbuffer_t  execbuffer;
  382.     BoxRec box;
  383.  
  384.     struct kgem_bo src_bo, mask_bo, dst_bo;
  385.  
  386.     memset(&cop, 0, sizeof(cop));
  387.     memset(&execbuffer,  0, sizeof(execbuffer));
  388.     memset(&src_bo, 0, sizeof(src_bo));
  389.     memset(&dst_bo, 0, sizeof(dst_bo));
  390.     memset(&mask_bo, 0, sizeof(mask_bo));
  391.  
  392.     src_bo.gaddr  = src_bitmap->gaddr;
  393.     src_bo.pitch  = src_bitmap->pitch;
  394.     src_bo.tiling = 0;
  395.  
  396.     dst_bo.gaddr  = dst_bitmap->gaddr;
  397.     dst_bo.pitch  = dst_bitmap->pitch;
  398.     dst_bo.tiling = 0;
  399.  
  400.     mask_bo.gaddr  = mask_bitmap->gaddr;
  401.     mask_bo.pitch  = mask_bitmap->pitch;
  402.     mask_bo.tiling = 0;
  403.  
  404.     box.x1 = dst_x;
  405.     box.y1 = dst_y;
  406.     box.x2 = dst_x+w;
  407.     box.y2 = dst_y+h;
  408.  
  409.     sna_device->render.composite(sna_device, 0,
  410.                                  src_bitmap, &src_bo,
  411.                                  mask_bitmap, &mask_bo,
  412.                                  dst_bitmap, &dst_bo,
  413.                                  src_x, src_y,
  414.                                  src_x, src_y,
  415.                                  dst_x, dst_y,
  416.                                  w, h, &cop);
  417.  
  418.     cop.box(sna_device, &cop, &box);
  419.     cop.done(sna_device, &cop);
  420.  
  421.     INIT_LIST_HEAD(&execbuffer.objects);
  422.     list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
  423.     list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
  424.  
  425.     _kgem_submit(&sna_device->kgem, &execbuffer);
  426.  
  427. };
  428.  
  429. */
  430.  
  431. static const struct intel_device_info intel_generic_info = {
  432.         .gen = -1,
  433. };
  434.  
  435. static const struct intel_device_info intel_i915_info = {
  436.         .gen = 030,
  437. };
  438. static const struct intel_device_info intel_i945_info = {
  439.         .gen = 031,
  440. };
  441.  
  442. static const struct intel_device_info intel_g33_info = {
  443.         .gen = 033,
  444. };
  445.  
  446. static const struct intel_device_info intel_i965_info = {
  447.         .gen = 040,
  448. };
  449.  
  450. static const struct intel_device_info intel_g4x_info = {
  451.         .gen = 045,
  452. };
  453.  
  454. static const struct intel_device_info intel_ironlake_info = {
  455.         .gen = 050,
  456. };
  457.  
  458. static const struct intel_device_info intel_sandybridge_info = {
  459.         .gen = 060,
  460. };
  461.  
  462. static const struct intel_device_info intel_ivybridge_info = {
  463.         .gen = 070,
  464. };
  465.  
  466. static const struct intel_device_info intel_valleyview_info = {
  467.         .gen = 071,
  468. };
  469.  
  470. static const struct intel_device_info intel_haswell_info = {
  471.         .gen = 075,
  472. };
  473.  
  474. #define INTEL_DEVICE_MATCH(d,i) \
  475.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  476.  
  477.  
  478. static const struct pci_id_match intel_device_match[] = {
  479.  
  480.  
  481.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  482.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  483.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  484.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  485.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  486.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  487.  
  488.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  489.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  490.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  491.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  492.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  493.          * like its G35 brethren.
  494.          */
  495.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  496.  
  497.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  498.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  499.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  500.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  501.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  502.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  503.  
  504.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  505.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  506.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  507.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  508.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  509.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  510.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  511.  
  512.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  513.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  514.  
  515.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  516.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  517.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  518.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  519.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  520.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  521.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  522.  
  523.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  524.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  525.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  526.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  527.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  528.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  529.  
  530.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  531.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  532.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  533.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  534.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  535.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  536.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  537.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  538.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  539.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  540.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  541.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  542.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  543.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  544.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  545.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  546.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  547.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  548.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  549.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  550.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  551.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  552.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  553.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  554.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  555.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  556.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  557.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  558.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  559.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  560.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  561.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  562.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  563.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  564.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  565.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  566.  
  567.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  568.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  569.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  570.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  571.  
  572.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  573.  
  574.         { 0, 0, 0 },
  575. };
  576.  
  577. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  578. {
  579.     while(list->device_id)
  580.     {
  581.         if(dev==list->device_id)
  582.             return list;
  583.         list++;
  584.     }
  585.     return NULL;
  586. }
  587.  
  588. const struct intel_device_info *
  589. intel_detect_chipset(struct pci_device *pci)
  590. {
  591.     const struct pci_id_match *ent = NULL;
  592.         const char *name = NULL;
  593.         int i;
  594.  
  595.     ent = PciDevMatch(pci->device_id, intel_device_match);
  596.    
  597.     if(ent != NULL)
  598.         return (const struct intel_device_info*)ent->match_data;
  599.     else    
  600.         return &intel_generic_info;
  601.        
  602. #if 0        
  603.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  604.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  605.                         name = intel_chipsets[i].name;
  606.                         break;
  607.                 }
  608.         }
  609.         if (name == NULL) {
  610.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  611.                 name = "unknown";
  612.         } else {
  613.                 xf86DrvMsg(scrn->scrnIndex, from,
  614.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  615.                            name);
  616.         }
  617.  
  618.         scrn->chipset = name;
  619. #endif
  620.        
  621. }
  622.  
  623.  
  624. int drmIoctl(int fd, unsigned long request, void *arg)
  625. {
  626.     ioctl_t  io;
  627.  
  628.     io.handle   = fd;
  629.     io.io_code  = request;
  630.     io.input    = arg;
  631.     io.inp_size = 64;
  632.     io.output   = NULL;
  633.     io.out_size = 0;
  634.  
  635.     return call_service(&io);
  636. }
  637.  
  638.  
  639.  
  640.