Subversion Repositories Kolibri OS

Rev

Rev 3278 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. //#include "../bitmap.h"
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5.  
  6. #include "sna.h"
  7.  
  8. #include <pixlib2.h>
  9.  
  10. static struct sna_fb sna_fb;
  11. static struct kgem_bo *mask_bo;
  12.  
  13. typedef struct __attribute__((packed))
  14. {
  15.   unsigned      handle;
  16.   unsigned      io_code;
  17.   void          *input;
  18.   int           inp_size;
  19.   void          *output;
  20.   int           out_size;
  21. }ioctl_t;
  22.  
  23.  
  24. static int call_service(ioctl_t *io)
  25. {
  26.   int retval;
  27.  
  28.   asm volatile("int $0x40"
  29.       :"=a"(retval)
  30.       :"a"(68),"b"(17),"c"(io)
  31.       :"memory","cc");
  32.  
  33.   return retval;
  34. };
  35.  
  36. static inline void get_proc_info(char *info)
  37. {
  38.     __asm__ __volatile__(
  39.     "int $0x40"
  40.     :
  41.     :"a"(9), "b"(info), "c"(-1));
  42. }
  43.  
  44. const struct intel_device_info *
  45. intel_detect_chipset(struct pci_device *pci);
  46.  
  47. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  48.  
  49. static bool sna_solid_cache_init(struct sna *sna);
  50.  
  51. struct sna *sna_device;
  52.  
  53. static void no_render_reset(struct sna *sna)
  54. {
  55.         (void)sna;
  56. }
  57.  
  58. void no_render_init(struct sna *sna)
  59. {
  60.     struct sna_render *render = &sna->render;
  61.  
  62.     memset (render,0, sizeof (*render));
  63.  
  64.     render->prefer_gpu = PREFER_GPU_BLT;
  65.  
  66.     render->vertices = render->vertex_data;
  67.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  68.  
  69. //    render->composite = no_render_composite;
  70.  
  71. //    render->copy_boxes = no_render_copy_boxes;
  72. //    render->copy = no_render_copy;
  73.  
  74. //    render->fill_boxes = no_render_fill_boxes;
  75. //    render->fill = no_render_fill;
  76. //    render->fill_one = no_render_fill_one;
  77. //    render->clear = no_render_clear;
  78.  
  79.     render->reset = no_render_reset;
  80. //    render->flush = no_render_flush;
  81. //    render->fini = no_render_fini;
  82.  
  83. //    sna->kgem.context_switch = no_render_context_switch;
  84. //    sna->kgem.retire = no_render_retire;
  85.  
  86.       if (sna->kgem.gen >= 60)
  87.         sna->kgem.ring = KGEM_RENDER;
  88.  
  89.       sna_vertex_init(sna);
  90. }
  91.  
  92. void sna_vertex_init(struct sna *sna)
  93. {
  94. //    pthread_mutex_init(&sna->render.lock, NULL);
  95. //    pthread_cond_init(&sna->render.wait, NULL);
  96.     sna->render.active = 0;
  97. }
  98.  
  99. bool sna_accel_init(struct sna *sna)
  100. {
  101.     const char *backend;
  102.  
  103. //    list_init(&sna->deferred_free);
  104. //    list_init(&sna->dirty_pixmaps);
  105. //    list_init(&sna->active_pixmaps);
  106. //    list_init(&sna->inactive_clock[0]);
  107. //    list_init(&sna->inactive_clock[1]);
  108.  
  109. //    sna_accel_install_timers(sna);
  110.  
  111.  
  112.     backend = "no";
  113.     no_render_init(sna);
  114.  
  115.         if (sna->info->gen >= 0100) {
  116.         } else if (sna->info->gen >= 070) {
  117.                 if (gen7_render_init(sna))
  118.                         backend = "IvyBridge";
  119.         } else if (sna->info->gen >= 060) {
  120.                 if (gen6_render_init(sna))
  121.                         backend = "SandyBridge";
  122.         } else if (sna->info->gen >= 050) {
  123.                 if (gen5_render_init(sna))
  124.                         backend = "Ironlake";
  125. /*      } else if (sna->info->gen >= 040) {
  126.                 if (gen4_render_init(sna))
  127.                         backend = "Broadwater/Crestline";
  128.         } else if (sna->info->gen >= 030) {
  129.                 if (gen3_render_init(sna))
  130.                         backend = "gen3";
  131.         } else if (sna->info->gen >= 020) {
  132.                 if (gen2_render_init(sna))
  133.                         backend = "gen2"; */
  134.         }
  135.  
  136.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  137.              __FUNCTION__, backend, sna->render.prefer_gpu));
  138.  
  139.     kgem_reset(&sna->kgem);
  140.  
  141. //    if (!sna_solid_cache_init(sna))
  142. //        return false;
  143.  
  144.     sna_device = sna;
  145.  
  146.  
  147.     return kgem_init_fb(&sna->kgem, &sna_fb);
  148. }
  149.  
  150. int sna_init(uint32_t service)
  151. {
  152.     ioctl_t   io;
  153.  
  154.     static struct pci_device device;
  155.     struct sna *sna;
  156.  
  157.     DBG(("%s\n", __FUNCTION__));
  158.  
  159.     sna = malloc(sizeof(struct sna));
  160.     if (sna == NULL)
  161.         return false;
  162.  
  163.     io.handle   = service;
  164.     io.io_code  = SRV_GET_PCI_INFO;
  165.     io.input    = &device;
  166.     io.inp_size = sizeof(device);
  167.     io.output   = NULL;
  168.     io.out_size = 0;
  169.  
  170.     if (call_service(&io)!=0)
  171.         return false;
  172.  
  173.     sna->PciInfo = &device;
  174.  
  175.         sna->info = intel_detect_chipset(sna->PciInfo);
  176.  
  177.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  178. /*
  179.     if (!xf86ReturnOptValBool(sna->Options,
  180.                   OPTION_RELAXED_FENCING,
  181.                   sna->kgem.has_relaxed_fencing)) {
  182.         xf86DrvMsg(scrn->scrnIndex,
  183.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  184.                "Disabling use of relaxed fencing\n");
  185.         sna->kgem.has_relaxed_fencing = 0;
  186.     }
  187.     if (!xf86ReturnOptValBool(sna->Options,
  188.                   OPTION_VMAP,
  189.                   sna->kgem.has_vmap)) {
  190.         xf86DrvMsg(scrn->scrnIndex,
  191.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  192.                "Disabling use of vmap\n");
  193.         sna->kgem.has_vmap = 0;
  194.     }
  195. */
  196.  
  197.     /* Disable tiling by default */
  198.     sna->tiling = SNA_TILING_DISABLE;
  199.  
  200.     /* Default fail-safe value of 75 Hz */
  201. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  202.  
  203.     sna->flags = 0;
  204.  
  205.     return sna_accel_init(sna);
  206. }
  207.  
  208. #if 0
  209.  
  210. static bool sna_solid_cache_init(struct sna *sna)
  211. {
  212.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  213.  
  214.     DBG(("%s\n", __FUNCTION__));
  215.  
  216.     cache->cache_bo =
  217.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  218.     if (!cache->cache_bo)
  219.         return FALSE;
  220.  
  221.     /*
  222.      * Initialise [0] with white since it is very common and filling the
  223.      * zeroth slot simplifies some of the checks.
  224.      */
  225.     cache->color[0] = 0xffffffff;
  226.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  227.     cache->bo[0]->pitch = 4;
  228.     cache->dirty = 1;
  229.     cache->size = 1;
  230.     cache->last = 0;
  231.  
  232.     return TRUE;
  233. }
  234.  
  235. void
  236. sna_render_flush_solid(struct sna *sna)
  237. {
  238.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  239.  
  240.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  241.     assert(cache->dirty);
  242.     assert(cache->size);
  243.  
  244.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  245.               cache->color, cache->size*sizeof(uint32_t));
  246.     cache->dirty = 0;
  247.     cache->last = 0;
  248. }
  249.  
  250. static void
  251. sna_render_finish_solid(struct sna *sna, bool force)
  252. {
  253.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  254.     int i;
  255.  
  256.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  257.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  258.  
  259.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  260.         return;
  261.  
  262.     if (cache->dirty)
  263.         sna_render_flush_solid(sna);
  264.  
  265.     for (i = 0; i < cache->size; i++) {
  266.         if (cache->bo[i] == NULL)
  267.             continue;
  268.  
  269.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  270.         cache->bo[i] = NULL;
  271.     }
  272.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  273.  
  274.     DBG(("sna_render_finish_solid reset\n"));
  275.  
  276.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  277.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  278.     cache->bo[0]->pitch = 4;
  279.     if (force)
  280.         cache->size = 1;
  281. }
  282.  
  283.  
  284. struct kgem_bo *
  285. sna_render_get_solid(struct sna *sna, uint32_t color)
  286. {
  287.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  288.     int i;
  289.  
  290.     DBG(("%s: %08x\n", __FUNCTION__, color));
  291.  
  292. //    if ((color & 0xffffff) == 0) /* alpha only */
  293. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  294.  
  295.     if (color == 0xffffffff) {
  296.         DBG(("%s(white)\n", __FUNCTION__));
  297.         return kgem_bo_reference(cache->bo[0]);
  298.     }
  299.  
  300.     if (cache->color[cache->last] == color) {
  301.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  302.              cache->last, color));
  303.         return kgem_bo_reference(cache->bo[cache->last]);
  304.     }
  305.  
  306.     for (i = 1; i < cache->size; i++) {
  307.         if (cache->color[i] == color) {
  308.             if (cache->bo[i] == NULL) {
  309.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  310.                      i, color));
  311.                 goto create;
  312.             } else {
  313.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  314.                      i, color));
  315.                 goto done;
  316.             }
  317.         }
  318.     }
  319.  
  320.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  321.  
  322.     i = cache->size++;
  323.     cache->color[i] = color;
  324.     cache->dirty = 1;
  325.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  326.  
  327. create:
  328.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  329.                      i*sizeof(uint32_t), sizeof(uint32_t));
  330.     cache->bo[i]->pitch = 4;
  331.  
  332. done:
  333.     cache->last = i;
  334.     return kgem_bo_reference(cache->bo[i]);
  335. }
  336.  
  337.  
  338.  
  339. int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
  340.                   int w, int h, int src_x, int src_y)
  341.  
  342. {
  343.     struct sna_copy_op copy;
  344.     struct _Pixmap src, dst;
  345.     struct kgem_bo *src_bo;
  346.  
  347.     char proc_info[1024];
  348.     int winx, winy;
  349.  
  350.     get_proc_info(proc_info);
  351.  
  352.     winx = *(uint32_t*)(proc_info+34);
  353.     winy = *(uint32_t*)(proc_info+38);
  354.    
  355.     memset(&src, 0, sizeof(src));
  356.     memset(&dst, 0, sizeof(dst));
  357.  
  358.     src.drawable.bitsPerPixel = 32;
  359.     src.drawable.width  = src_bitmap->width;
  360.     src.drawable.height = src_bitmap->height;
  361.  
  362.     dst.drawable.bitsPerPixel = 32;
  363.     dst.drawable.width  = sna_fb.width;
  364.     dst.drawable.height = sna_fb.height;
  365.  
  366.     memset(&copy, 0, sizeof(copy));
  367.  
  368.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  369.    
  370.     if( sna_device->render.copy(sna_device, GXcopy,
  371.                                 &src, src_bo,
  372.                                 &dst, sna_fb.fb_bo, &copy) )
  373.     {                            
  374.         copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
  375.     copy.done(sna_device, &copy);
  376.     }
  377.  
  378.     kgem_submit(&sna_device->kgem);
  379.  
  380. //    __asm__ __volatile__("int3");
  381.  
  382. };
  383. #endif
  384.  
  385.  
  386. int sna_create_bitmap(bitmap_t *bitmap)
  387. {
  388.         struct kgem_bo *bo;
  389.  
  390.     bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
  391.                         32,I915_TILING_NONE, CREATE_CPU_MAP);
  392.  
  393.     if(bo == NULL)
  394.         goto err_1;
  395.      
  396.     void *map = kgem_bo_map(&sna_device->kgem, bo);
  397.     if(map == NULL)
  398.         goto err_2;
  399.        
  400.     bitmap->handle = (uint32_t)bo;
  401.     bitmap->pitch  = bo->pitch;
  402.     bitmap->data   = map;
  403.    
  404.     return 0;
  405.    
  406. err_2:
  407.     kgem_bo_destroy(&sna_device->kgem, bo);
  408.    
  409. err_1:
  410.     return -1;        
  411.            
  412. };
  413.  
  414. void sna_lock_bitmap(bitmap_t *bitmap)
  415. {
  416.         struct kgem_bo *bo;
  417.    
  418.     bo = (struct kgem_bo *)bitmap->handle;
  419.        
  420.     kgem_bo_sync__cpu(&sna_device->kgem, bo);
  421.  
  422. };
  423.  
  424. int sna_create_mask()
  425. {
  426.         struct kgem_bo *bo;
  427.     char proc_info[1024];
  428.     int width, height;
  429.     int i;
  430.  
  431.     get_proc_info(proc_info);
  432.  
  433.     width  = *(uint32_t*)(proc_info+42)+1;
  434.     height = *(uint32_t*)(proc_info+46)+1;
  435.    
  436.     printf("%s width %d height %d\n", __FUNCTION__, width, height);
  437.    
  438.     bo = kgem_create_2d(&sna_device->kgem, width, height,
  439.                         8,I915_TILING_NONE, CREATE_CPU_MAP);
  440.    
  441.     if(bo == NULL)
  442.         goto err_1;
  443.      
  444.     int *map = kgem_bo_map(&sna_device->kgem, bo);
  445.     if(map == NULL)
  446.         goto err_2;
  447.        
  448.     memset(map, 0, bo->pitch * height);
  449.    
  450.     mask_bo = bo;
  451.  
  452.     return 0;
  453.    
  454. err_2:
  455.     kgem_bo_destroy(&sna_device->kgem, bo);
  456.    
  457. err_1:
  458.     return -1;
  459.            
  460. };
  461.  
  462.  
  463. bool
  464. gen6_composite(struct sna *sna,
  465.               uint8_t op,
  466.                       PixmapPtr src, struct kgem_bo *src_bo,
  467.                       PixmapPtr mask,struct kgem_bo *mask_bo,
  468.                       PixmapPtr dst, struct kgem_bo *dst_bo,
  469.               int32_t src_x, int32_t src_y,
  470.               int32_t msk_x, int32_t msk_y,
  471.               int32_t dst_x, int32_t dst_y,
  472.               int32_t width, int32_t height,
  473.               struct sna_composite_op *tmp);
  474.  
  475.  
  476. #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
  477.  
  478. int sna_blit_tex(bitmap_t *src_bitmap, int dst_x, int dst_y,
  479.                   int w, int h, int src_x, int src_y)
  480.  
  481. {
  482.  
  483. //    box.x1 = dst_x;
  484. //    box.y1 = dst_y;
  485. //    box.x2 = dst_x+w;
  486. //    box.y2 = dst_y+h;
  487.  
  488.  
  489.  //   cop.box(sna_device, &cop, &box);
  490.  
  491.     struct drm_i915_mask_update update;
  492.    
  493.     struct sna_composite_op composite;
  494.     struct _Pixmap src, dst, mask;
  495.     struct kgem_bo *src_bo;
  496.  
  497.     char proc_info[1024];
  498.     int winx, winy, winw, winh;
  499.  
  500.     get_proc_info(proc_info);
  501.  
  502.     winx = *(uint32_t*)(proc_info+34);
  503.     winy = *(uint32_t*)(proc_info+38);
  504.     winw = *(uint32_t*)(proc_info+42)+1;
  505.     winh = *(uint32_t*)(proc_info+46)+1;
  506.    
  507.     memset(&src, 0, sizeof(src));
  508.     memset(&dst, 0, sizeof(dst));
  509.     memset(&mask, 0, sizeof(dst));
  510.  
  511.     src.drawable.bitsPerPixel = 32;
  512.     src.drawable.width  = src_bitmap->width;
  513.     src.drawable.height = src_bitmap->height;
  514.  
  515.     dst.drawable.bitsPerPixel = 32;
  516.     dst.drawable.width  = sna_fb.width;
  517.     dst.drawable.height = sna_fb.height;
  518.    
  519.     mask.drawable.bitsPerPixel = 8;
  520.     mask.drawable.width  = winw;
  521.     mask.drawable.height = winh;
  522.  
  523.     memset(&composite, 0, sizeof(composite));
  524.  
  525.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  526.    
  527.    
  528.     if( sna_device->render.blit_tex(sna_device, PictOpSrc,
  529.                       &src, src_bo,
  530.                       &mask, mask_bo,
  531.                       &dst, sna_fb.fb_bo,
  532.                                  src_x, src_y,
  533.                                  dst_x, dst_y,
  534.               winx+dst_x, winy+dst_y,
  535.               w, h,
  536.               &composite) )
  537.     {
  538.             struct sna_composite_rectangles r;
  539.        
  540.             r.src.x = src_x;
  541.             r.src.y = src_y;
  542.             r.mask.x = dst_x;
  543.             r.mask.y = dst_y;
  544.                 r.dst.x = winx+dst_x;
  545.             r.dst.y = winy+dst_y;
  546.             r.width  = w;
  547.             r.height = h;
  548.        
  549.         composite.blt(sna_device, &composite, &r);
  550.         composite.done(sna_device, &composite);
  551.     };
  552.    
  553.     VG_CLEAR(update);
  554.         update.handle = mask_bo->handle;
  555.         update.bo_size   = __kgem_bo_size(mask_bo);
  556.         update.bo_pitch  = mask_bo->pitch;
  557.         update.bo_map    = MAP(mask_bo->map);
  558.         drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
  559.  
  560.     kgem_submit(&sna_device->kgem);
  561.  
  562.     return 0;            
  563. }
  564.  
  565.  
  566.  
  567.  
  568.  
  569.  
  570.  
  571.  
  572.  
  573. static const struct intel_device_info intel_generic_info = {
  574.         .gen = -1,
  575. };
  576.  
  577. static const struct intel_device_info intel_i915_info = {
  578.         .gen = 030,
  579. };
  580. static const struct intel_device_info intel_i945_info = {
  581.         .gen = 031,
  582. };
  583.  
  584. static const struct intel_device_info intel_g33_info = {
  585.         .gen = 033,
  586. };
  587.  
  588. static const struct intel_device_info intel_i965_info = {
  589.         .gen = 040,
  590. };
  591.  
  592. static const struct intel_device_info intel_g4x_info = {
  593.         .gen = 045,
  594. };
  595.  
  596. static const struct intel_device_info intel_ironlake_info = {
  597.         .gen = 050,
  598. };
  599.  
  600. static const struct intel_device_info intel_sandybridge_info = {
  601.         .gen = 060,
  602. };
  603.  
  604. static const struct intel_device_info intel_ivybridge_info = {
  605.         .gen = 070,
  606. };
  607.  
  608. static const struct intel_device_info intel_valleyview_info = {
  609.         .gen = 071,
  610. };
  611.  
  612. static const struct intel_device_info intel_haswell_info = {
  613.         .gen = 075,
  614. };
  615.  
  616. #define INTEL_DEVICE_MATCH(d,i) \
  617.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  618.  
  619.  
  620. static const struct pci_id_match intel_device_match[] = {
  621.  
  622.  
  623.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  624.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  625.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  626.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  627.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  628.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  629.  
  630.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  631.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  632.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  633.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  634.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  635.          * like its G35 brethren.
  636.          */
  637.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  638.  
  639.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  640.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  641.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  642.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  643.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  644.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  645.  
  646.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  647.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  648.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  649.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  650.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  651.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  652.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  653.  
  654.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  655.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  656.  
  657.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  658.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  659.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  660.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  661.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  662.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  663.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  664.  
  665.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  666.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  667.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  668.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  669.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  670.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  671.  
  672.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  673.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  674.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  675.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  676.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  677.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  678.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  679.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  680.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  681.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  682.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  683.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  684.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  685.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  686.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  687.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  688.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  689.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  690.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  691.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  692.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  693.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  694.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  695.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  696.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  697.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  698.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  699.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  700.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  701.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  702.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  703.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  704.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  705.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  706.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  707.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  708.  
  709.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  710.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  711.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  712.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  713.  
  714.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  715.  
  716.         { 0, 0, 0 },
  717. };
  718.  
  719. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  720. {
  721.     while(list->device_id)
  722.     {
  723.         if(dev==list->device_id)
  724.             return list;
  725.         list++;
  726.     }
  727.     return NULL;
  728. }
  729.  
  730. const struct intel_device_info *
  731. intel_detect_chipset(struct pci_device *pci)
  732. {
  733.     const struct pci_id_match *ent = NULL;
  734.         const char *name = NULL;
  735.         int i;
  736.  
  737.     ent = PciDevMatch(pci->device_id, intel_device_match);
  738.    
  739.     if(ent != NULL)
  740.         return (const struct intel_device_info*)ent->match_data;
  741.     else    
  742.         return &intel_generic_info;
  743.        
  744. #if 0        
  745.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  746.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  747.                         name = intel_chipsets[i].name;
  748.                         break;
  749.                 }
  750.         }
  751.         if (name == NULL) {
  752.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  753.                 name = "unknown";
  754.         } else {
  755.                 xf86DrvMsg(scrn->scrnIndex, from,
  756.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  757.                            name);
  758.         }
  759.  
  760.         scrn->chipset = name;
  761. #endif
  762.        
  763. }
  764.  
  765.  
  766. int drmIoctl(int fd, unsigned long request, void *arg)
  767. {
  768.     ioctl_t  io;
  769.  
  770.     io.handle   = fd;
  771.     io.io_code  = request;
  772.     io.input    = arg;
  773.     io.inp_size = 64;
  774.     io.output   = NULL;
  775.     io.out_size = 0;
  776.  
  777.     return call_service(&io);
  778. }
  779.  
  780.  
  781.  
  782.