Subversion Repositories Kolibri OS

Rev

Rev 3263 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. //#include "../bitmap.h"
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5.  
  6. #include "sna.h"
  7.  
  8. #include <pixlib2.h>
  9.  
  10. static struct sna_fb sna_fb;
  11.  
  12. typedef struct __attribute__((packed))
  13. {
  14.   unsigned      handle;
  15.   unsigned      io_code;
  16.   void          *input;
  17.   int           inp_size;
  18.   void          *output;
  19.   int           out_size;
  20. }ioctl_t;
  21.  
  22.  
  23. static int call_service(ioctl_t *io)
  24. {
  25.   int retval;
  26.  
  27.   asm volatile("int $0x40"
  28.       :"=a"(retval)
  29.       :"a"(68),"b"(17),"c"(io)
  30.       :"memory","cc");
  31.  
  32.   return retval;
  33. };
  34.  
  35. static inline void get_proc_info(char *info)
  36. {
  37.     __asm__ __volatile__(
  38.     "int $0x40"
  39.     :
  40.     :"a"(9), "b"(info), "c"(-1));
  41. }
  42.  
  43. const struct intel_device_info *
  44. intel_detect_chipset(struct pci_device *pci);
  45.  
  46. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  47.  
  48. static bool sna_solid_cache_init(struct sna *sna);
  49.  
  50. struct sna *sna_device;
  51.  
  52. static void no_render_reset(struct sna *sna)
  53. {
  54.         (void)sna;
  55. }
  56.  
  57. void no_render_init(struct sna *sna)
  58. {
  59.     struct sna_render *render = &sna->render;
  60.  
  61.     memset (render,0, sizeof (*render));
  62.  
  63.     render->prefer_gpu = PREFER_GPU_BLT;
  64.  
  65.     render->vertices = render->vertex_data;
  66.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  67.  
  68. //    render->composite = no_render_composite;
  69.  
  70. //    render->copy_boxes = no_render_copy_boxes;
  71. //    render->copy = no_render_copy;
  72.  
  73. //    render->fill_boxes = no_render_fill_boxes;
  74. //    render->fill = no_render_fill;
  75. //    render->fill_one = no_render_fill_one;
  76. //    render->clear = no_render_clear;
  77.  
  78.     render->reset = no_render_reset;
  79. //    render->flush = no_render_flush;
  80. //    render->fini = no_render_fini;
  81.  
  82. //    sna->kgem.context_switch = no_render_context_switch;
  83. //    sna->kgem.retire = no_render_retire;
  84.  
  85.       if (sna->kgem.gen >= 60)
  86.         sna->kgem.ring = KGEM_RENDER;
  87.  
  88.       sna_vertex_init(sna);
  89. }
  90.  
  91. void sna_vertex_init(struct sna *sna)
  92. {
  93. //    pthread_mutex_init(&sna->render.lock, NULL);
  94. //    pthread_cond_init(&sna->render.wait, NULL);
  95.     sna->render.active = 0;
  96. }
  97.  
  98. bool sna_accel_init(struct sna *sna)
  99. {
  100.     const char *backend;
  101.  
  102. //    list_init(&sna->deferred_free);
  103. //    list_init(&sna->dirty_pixmaps);
  104. //    list_init(&sna->active_pixmaps);
  105. //    list_init(&sna->inactive_clock[0]);
  106. //    list_init(&sna->inactive_clock[1]);
  107.  
  108. //    sna_accel_install_timers(sna);
  109.  
  110.  
  111.     backend = "no";
  112.     no_render_init(sna);
  113.  
  114.         if (sna->info->gen >= 0100) {
  115. /*      } else if (sna->info->gen >= 070) {
  116.                 if (gen7_render_init(sna))
  117.                         backend = "IvyBridge";  */
  118.         } else if (sna->info->gen >= 060) {
  119.                 if (gen6_render_init(sna))
  120.                         backend = "SandyBridge";
  121. /*      } else if (sna->info->gen >= 050) {
  122.                 if (gen5_render_init(sna))
  123.                         backend = "Ironlake";
  124.         } else if (sna->info->gen >= 040) {
  125.                 if (gen4_render_init(sna))
  126.                         backend = "Broadwater/Crestline";
  127.         } else if (sna->info->gen >= 030) {
  128.                 if (gen3_render_init(sna))
  129.                         backend = "gen3";
  130.         } else if (sna->info->gen >= 020) {
  131.                 if (gen2_render_init(sna))
  132.                         backend = "gen2"; */
  133.         }
  134.  
  135.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  136.              __FUNCTION__, backend, sna->render.prefer_gpu));
  137.  
  138.     kgem_reset(&sna->kgem);
  139.  
  140. //    if (!sna_solid_cache_init(sna))
  141. //        return false;
  142.  
  143.     sna_device = sna;
  144.  
  145.  
  146.     return kgem_init_fb(&sna->kgem, &sna_fb);
  147. }
  148.  
  149. int sna_init(uint32_t service)
  150. {
  151.     ioctl_t   io;
  152.  
  153.     static struct pci_device device;
  154.     struct sna *sna;
  155.  
  156.     DBG(("%s\n", __FUNCTION__));
  157.  
  158.     sna = malloc(sizeof(struct sna));
  159.     if (sna == NULL)
  160.         return false;
  161.  
  162.     io.handle   = service;
  163.     io.io_code  = SRV_GET_PCI_INFO;
  164.     io.input    = &device;
  165.     io.inp_size = sizeof(device);
  166.     io.output   = NULL;
  167.     io.out_size = 0;
  168.  
  169.     if (call_service(&io)!=0)
  170.         return false;
  171.  
  172.     sna->PciInfo = &device;
  173.  
  174.         sna->info = intel_detect_chipset(sna->PciInfo);
  175.  
  176.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  177. /*
  178.     if (!xf86ReturnOptValBool(sna->Options,
  179.                   OPTION_RELAXED_FENCING,
  180.                   sna->kgem.has_relaxed_fencing)) {
  181.         xf86DrvMsg(scrn->scrnIndex,
  182.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  183.                "Disabling use of relaxed fencing\n");
  184.         sna->kgem.has_relaxed_fencing = 0;
  185.     }
  186.     if (!xf86ReturnOptValBool(sna->Options,
  187.                   OPTION_VMAP,
  188.                   sna->kgem.has_vmap)) {
  189.         xf86DrvMsg(scrn->scrnIndex,
  190.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  191.                "Disabling use of vmap\n");
  192.         sna->kgem.has_vmap = 0;
  193.     }
  194. */
  195.  
  196.     /* Disable tiling by default */
  197.     sna->tiling = SNA_TILING_DISABLE;
  198.  
  199.     /* Default fail-safe value of 75 Hz */
  200. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  201.  
  202.     sna->flags = 0;
  203.  
  204.     return sna_accel_init(sna);
  205. }
  206.  
  207. #if 0
  208.  
  209. static bool sna_solid_cache_init(struct sna *sna)
  210. {
  211.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  212.  
  213.     DBG(("%s\n", __FUNCTION__));
  214.  
  215.     cache->cache_bo =
  216.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  217.     if (!cache->cache_bo)
  218.         return FALSE;
  219.  
  220.     /*
  221.      * Initialise [0] with white since it is very common and filling the
  222.      * zeroth slot simplifies some of the checks.
  223.      */
  224.     cache->color[0] = 0xffffffff;
  225.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  226.     cache->bo[0]->pitch = 4;
  227.     cache->dirty = 1;
  228.     cache->size = 1;
  229.     cache->last = 0;
  230.  
  231.     return TRUE;
  232. }
  233.  
  234. void
  235. sna_render_flush_solid(struct sna *sna)
  236. {
  237.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  238.  
  239.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  240.     assert(cache->dirty);
  241.     assert(cache->size);
  242.  
  243.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  244.               cache->color, cache->size*sizeof(uint32_t));
  245.     cache->dirty = 0;
  246.     cache->last = 0;
  247. }
  248.  
  249. static void
  250. sna_render_finish_solid(struct sna *sna, bool force)
  251. {
  252.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  253.     int i;
  254.  
  255.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  256.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  257.  
  258.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  259.         return;
  260.  
  261.     if (cache->dirty)
  262.         sna_render_flush_solid(sna);
  263.  
  264.     for (i = 0; i < cache->size; i++) {
  265.         if (cache->bo[i] == NULL)
  266.             continue;
  267.  
  268.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  269.         cache->bo[i] = NULL;
  270.     }
  271.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  272.  
  273.     DBG(("sna_render_finish_solid reset\n"));
  274.  
  275.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  276.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  277.     cache->bo[0]->pitch = 4;
  278.     if (force)
  279.         cache->size = 1;
  280. }
  281.  
  282.  
  283. struct kgem_bo *
  284. sna_render_get_solid(struct sna *sna, uint32_t color)
  285. {
  286.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  287.     int i;
  288.  
  289.     DBG(("%s: %08x\n", __FUNCTION__, color));
  290.  
  291. //    if ((color & 0xffffff) == 0) /* alpha only */
  292. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  293.  
  294.     if (color == 0xffffffff) {
  295.         DBG(("%s(white)\n", __FUNCTION__));
  296.         return kgem_bo_reference(cache->bo[0]);
  297.     }
  298.  
  299.     if (cache->color[cache->last] == color) {
  300.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  301.              cache->last, color));
  302.         return kgem_bo_reference(cache->bo[cache->last]);
  303.     }
  304.  
  305.     for (i = 1; i < cache->size; i++) {
  306.         if (cache->color[i] == color) {
  307.             if (cache->bo[i] == NULL) {
  308.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  309.                      i, color));
  310.                 goto create;
  311.             } else {
  312.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  313.                      i, color));
  314.                 goto done;
  315.             }
  316.         }
  317.     }
  318.  
  319.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  320.  
  321.     i = cache->size++;
  322.     cache->color[i] = color;
  323.     cache->dirty = 1;
  324.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  325.  
  326. create:
  327.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  328.                      i*sizeof(uint32_t), sizeof(uint32_t));
  329.     cache->bo[i]->pitch = 4;
  330.  
  331. done:
  332.     cache->last = i;
  333.     return kgem_bo_reference(cache->bo[i]);
  334. }
  335.  
  336. #endif
  337.  
  338.  
  339. int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
  340.                   int w, int h, int src_x, int src_y)
  341.  
  342. {
  343.     struct sna_copy_op copy;
  344.     struct _Pixmap src, dst;
  345.     struct kgem_bo *src_bo;
  346.  
  347.     char proc_info[1024];
  348.     int winx, winy;
  349.  
  350.     get_proc_info(proc_info);
  351.  
  352.     winx = *(uint32_t*)(proc_info+34);
  353.     winy = *(uint32_t*)(proc_info+38);
  354.    
  355.     memset(&src, 0, sizeof(src));
  356.     memset(&dst, 0, sizeof(dst));
  357.  
  358.     src.drawable.bitsPerPixel = 32;
  359.     src.drawable.width  = src_bitmap->width;
  360.     src.drawable.height = src_bitmap->height;
  361.  
  362.     dst.drawable.bitsPerPixel = 32;
  363.     dst.drawable.width  = sna_fb.width;
  364.     dst.drawable.height = sna_fb.height;
  365.  
  366.     memset(&copy, 0, sizeof(copy));
  367.  
  368.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  369.    
  370.     if( sna_device->render.copy(sna_device, GXcopy,
  371.                                 &src, src_bo,
  372.                                 &dst, sna_fb.fb_bo, &copy) )
  373.     {                            
  374.         copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
  375.     copy.done(sna_device, &copy);
  376.     }
  377.  
  378.     kgem_submit(&sna_device->kgem);
  379.  
  380. //    __asm__ __volatile__("int3");
  381.  
  382. };
  383.  
  384. int sna_create_bitmap(bitmap_t *bitmap)
  385. {
  386.         struct kgem_bo *bo;
  387.  
  388.     bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
  389.                         32,I915_TILING_NONE, CREATE_CPU_MAP);
  390.  
  391.     if(bo == NULL)
  392.         goto err_1;
  393.      
  394.     void *map = kgem_bo_map(&sna_device->kgem, bo);
  395.     if(map == NULL)
  396.         goto err_2;
  397.        
  398.     bitmap->handle = (uint32_t)bo;
  399.     bitmap->pitch  = bo->pitch;
  400.     bitmap->data   = map;
  401.    
  402.     return 0;
  403.    
  404. err_2:
  405.     kgem_bo_destroy(&sna_device->kgem, bo);
  406.    
  407. err_1:
  408.     return -1;        
  409.            
  410. };
  411.  
  412. void sna_lock_bitmap(bitmap_t *bitmap)
  413. {
  414.         struct kgem_bo *bo;
  415.    
  416.     bo = (struct kgem_bo *)bitmap->handle;
  417.        
  418.     kgem_bo_sync__cpu(&sna_device->kgem, bo);
  419.  
  420. };
  421.  
  422.  
  423.  
  424. /*
  425.  
  426. int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y,
  427.                   int w, int h, bitmap_t *src_bitmap, int src_x, int src_y,
  428.                   bitmap_t *mask_bitmap)
  429.  
  430. {
  431.     struct sna_composite_op cop;
  432.     batchbuffer_t  execbuffer;
  433.     BoxRec box;
  434.  
  435.     struct kgem_bo src_bo, mask_bo, dst_bo;
  436.  
  437.     memset(&cop, 0, sizeof(cop));
  438.     memset(&execbuffer,  0, sizeof(execbuffer));
  439.     memset(&src_bo, 0, sizeof(src_bo));
  440.     memset(&dst_bo, 0, sizeof(dst_bo));
  441.     memset(&mask_bo, 0, sizeof(mask_bo));
  442.  
  443.     src_bo.gaddr  = src_bitmap->gaddr;
  444.     src_bo.pitch  = src_bitmap->pitch;
  445.     src_bo.tiling = 0;
  446.  
  447.     dst_bo.gaddr  = dst_bitmap->gaddr;
  448.     dst_bo.pitch  = dst_bitmap->pitch;
  449.     dst_bo.tiling = 0;
  450.  
  451.     mask_bo.gaddr  = mask_bitmap->gaddr;
  452.     mask_bo.pitch  = mask_bitmap->pitch;
  453.     mask_bo.tiling = 0;
  454.  
  455.     box.x1 = dst_x;
  456.     box.y1 = dst_y;
  457.     box.x2 = dst_x+w;
  458.     box.y2 = dst_y+h;
  459.  
  460.     sna_device->render.composite(sna_device, 0,
  461.                                  src_bitmap, &src_bo,
  462.                                  mask_bitmap, &mask_bo,
  463.                                  dst_bitmap, &dst_bo,
  464.                                  src_x, src_y,
  465.                                  src_x, src_y,
  466.                                  dst_x, dst_y,
  467.                                  w, h, &cop);
  468.  
  469.     cop.box(sna_device, &cop, &box);
  470.     cop.done(sna_device, &cop);
  471.  
  472.     INIT_LIST_HEAD(&execbuffer.objects);
  473.     list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects);
  474.     list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects);
  475.  
  476.     _kgem_submit(&sna_device->kgem, &execbuffer);
  477.  
  478. };
  479.  
  480. */
  481.  
  482. static const struct intel_device_info intel_generic_info = {
  483.         .gen = -1,
  484. };
  485.  
  486. static const struct intel_device_info intel_i915_info = {
  487.         .gen = 030,
  488. };
  489. static const struct intel_device_info intel_i945_info = {
  490.         .gen = 031,
  491. };
  492.  
  493. static const struct intel_device_info intel_g33_info = {
  494.         .gen = 033,
  495. };
  496.  
  497. static const struct intel_device_info intel_i965_info = {
  498.         .gen = 040,
  499. };
  500.  
  501. static const struct intel_device_info intel_g4x_info = {
  502.         .gen = 045,
  503. };
  504.  
  505. static const struct intel_device_info intel_ironlake_info = {
  506.         .gen = 050,
  507. };
  508.  
  509. static const struct intel_device_info intel_sandybridge_info = {
  510.         .gen = 060,
  511. };
  512.  
  513. static const struct intel_device_info intel_ivybridge_info = {
  514.         .gen = 070,
  515. };
  516.  
  517. static const struct intel_device_info intel_valleyview_info = {
  518.         .gen = 071,
  519. };
  520.  
  521. static const struct intel_device_info intel_haswell_info = {
  522.         .gen = 075,
  523. };
  524.  
  525. #define INTEL_DEVICE_MATCH(d,i) \
  526.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  527.  
  528.  
  529. static const struct pci_id_match intel_device_match[] = {
  530.  
  531.  
  532.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  533.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  534.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  535.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  536.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  537.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  538.  
  539.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  540.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  541.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  542.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  543.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  544.          * like its G35 brethren.
  545.          */
  546.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  547.  
  548.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  549.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  550.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  551.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  552.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  553.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  554.  
  555.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  556.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  557.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  558.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  559.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  560.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  561.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  562.  
  563.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  564.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  565.  
  566.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  567.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  568.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  569.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  570.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  571.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  572.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  573.  
  574.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  575.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  576.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  577.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  578.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  579.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  580.  
  581.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  582.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  583.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  584.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  585.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  586.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  587.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  588.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  589.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  590.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  591.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  592.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  593.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  594.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  595.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  596.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  597.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  598.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  599.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  600.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  601.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  602.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  603.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  604.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  605.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  606.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  607.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  608.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  609.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  610.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  611.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  612.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  613.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  614.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  615.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  616.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  617.  
  618.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  619.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  620.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  621.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  622.  
  623.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  624.  
  625.         { 0, 0, 0 },
  626. };
  627.  
  628. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  629. {
  630.     while(list->device_id)
  631.     {
  632.         if(dev==list->device_id)
  633.             return list;
  634.         list++;
  635.     }
  636.     return NULL;
  637. }
  638.  
  639. const struct intel_device_info *
  640. intel_detect_chipset(struct pci_device *pci)
  641. {
  642.     const struct pci_id_match *ent = NULL;
  643.         const char *name = NULL;
  644.         int i;
  645.  
  646.     ent = PciDevMatch(pci->device_id, intel_device_match);
  647.    
  648.     if(ent != NULL)
  649.         return (const struct intel_device_info*)ent->match_data;
  650.     else    
  651.         return &intel_generic_info;
  652.        
  653. #if 0        
  654.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  655.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  656.                         name = intel_chipsets[i].name;
  657.                         break;
  658.                 }
  659.         }
  660.         if (name == NULL) {
  661.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  662.                 name = "unknown";
  663.         } else {
  664.                 xf86DrvMsg(scrn->scrnIndex, from,
  665.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  666.                            name);
  667.         }
  668.  
  669.         scrn->chipset = name;
  670. #endif
  671.        
  672. }
  673.  
  674.  
  675. int drmIoctl(int fd, unsigned long request, void *arg)
  676. {
  677.     ioctl_t  io;
  678.  
  679.     io.handle   = fd;
  680.     io.io_code  = request;
  681.     io.input    = arg;
  682.     io.inp_size = 64;
  683.     io.output   = NULL;
  684.     io.out_size = 0;
  685.  
  686.     return call_service(&io);
  687. }
  688.  
  689.  
  690.  
  691.