Subversion Repositories Kolibri OS

Rev

Rev 3280 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. //#include "../bitmap.h"
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5.  
  6. #include "sna.h"
  7.  
  8. #include <pixlib2.h>
  9.  
  10. static struct sna_fb sna_fb;
  11. static struct kgem_bo *mask_bo;
  12.  
  13. static int mask_width, mask_height;
  14.  
  15. static inline void delay(uint32_t time)
  16. {
  17.     __asm__ __volatile__(
  18.     "int $0x40"
  19.     ::"a"(5), "b"(time)
  20.     :"memory");
  21. };
  22.  
  23. typedef struct __attribute__((packed))
  24. {
  25.   unsigned      handle;
  26.   unsigned      io_code;
  27.   void          *input;
  28.   int           inp_size;
  29.   void          *output;
  30.   int           out_size;
  31. }ioctl_t;
  32.  
  33.  
  34. static int call_service(ioctl_t *io)
  35. {
  36.   int retval;
  37.  
  38.   asm volatile("int $0x40"
  39.       :"=a"(retval)
  40.       :"a"(68),"b"(17),"c"(io)
  41.       :"memory","cc");
  42.  
  43.   return retval;
  44. };
  45.  
  46. static inline void get_proc_info(char *info)
  47. {
  48.     __asm__ __volatile__(
  49.     "int $0x40"
  50.     :
  51.     :"a"(9), "b"(info), "c"(-1));
  52. }
  53.  
  54. const struct intel_device_info *
  55. intel_detect_chipset(struct pci_device *pci);
  56.  
  57. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  58.  
  59. static bool sna_solid_cache_init(struct sna *sna);
  60.  
  61. struct sna *sna_device;
  62.  
  63. static void no_render_reset(struct sna *sna)
  64. {
  65.         (void)sna;
  66. }
  67.  
  68. void no_render_init(struct sna *sna)
  69. {
  70.     struct sna_render *render = &sna->render;
  71.  
  72.     memset (render,0, sizeof (*render));
  73.  
  74.     render->prefer_gpu = PREFER_GPU_BLT;
  75.  
  76.     render->vertices = render->vertex_data;
  77.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  78.  
  79. //    render->composite = no_render_composite;
  80.  
  81. //    render->copy_boxes = no_render_copy_boxes;
  82. //    render->copy = no_render_copy;
  83.  
  84. //    render->fill_boxes = no_render_fill_boxes;
  85. //    render->fill = no_render_fill;
  86. //    render->fill_one = no_render_fill_one;
  87. //    render->clear = no_render_clear;
  88.  
  89.     render->reset = no_render_reset;
  90. //    render->flush = no_render_flush;
  91. //    render->fini = no_render_fini;
  92.  
  93. //    sna->kgem.context_switch = no_render_context_switch;
  94. //    sna->kgem.retire = no_render_retire;
  95.  
  96.       if (sna->kgem.gen >= 60)
  97.         sna->kgem.ring = KGEM_RENDER;
  98.  
  99.       sna_vertex_init(sna);
  100. }
  101.  
  102. void sna_vertex_init(struct sna *sna)
  103. {
  104. //    pthread_mutex_init(&sna->render.lock, NULL);
  105. //    pthread_cond_init(&sna->render.wait, NULL);
  106.     sna->render.active = 0;
  107. }
  108.  
  109. int sna_accel_init(struct sna *sna)
  110. {
  111.     const char *backend;
  112.  
  113. //    list_init(&sna->deferred_free);
  114. //    list_init(&sna->dirty_pixmaps);
  115. //    list_init(&sna->active_pixmaps);
  116. //    list_init(&sna->inactive_clock[0]);
  117. //    list_init(&sna->inactive_clock[1]);
  118.  
  119. //    sna_accel_install_timers(sna);
  120.  
  121.  
  122.     backend = "no";
  123.     no_render_init(sna);
  124.  
  125.         if (sna->info->gen >= 0100) {
  126.         } else if (sna->info->gen >= 070) {
  127.                 if (gen7_render_init(sna))
  128.                         backend = "IvyBridge";
  129.         } else if (sna->info->gen >= 060) {
  130.                 if (gen6_render_init(sna))
  131.                         backend = "SandyBridge";
  132.         } else if (sna->info->gen >= 050) {
  133.                 if (gen5_render_init(sna))
  134.                         backend = "Ironlake";
  135.         } else if (sna->info->gen >= 040) {
  136.                 if (gen4_render_init(sna))
  137.                         backend = "Broadwater/Crestline";
  138. /*      } else if (sna->info->gen >= 030) {
  139.                 if (gen3_render_init(sna))
  140.                         backend = "gen3"; */
  141.         }
  142.  
  143.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  144.              __FUNCTION__, backend, sna->render.prefer_gpu));
  145.  
  146.     kgem_reset(&sna->kgem);
  147.  
  148. //    if (!sna_solid_cache_init(sna))
  149. //        return false;
  150.  
  151.     sna_device = sna;
  152.  
  153.  
  154.     return kgem_init_fb(&sna->kgem, &sna_fb);
  155. }
  156.  
  157. int sna_init(uint32_t service)
  158. {
  159.     ioctl_t   io;
  160.  
  161.     static struct pci_device device;
  162.     struct sna *sna;
  163.  
  164.     DBG(("%s\n", __FUNCTION__));
  165.  
  166.     sna = malloc(sizeof(*sna));
  167.     if (sna == NULL)
  168.         return 0;
  169.  
  170.     memset(sna, 0, sizeof(*sna));
  171.    
  172.     io.handle   = service;
  173.     io.io_code  = SRV_GET_PCI_INFO;
  174.     io.input    = &device;
  175.     io.inp_size = sizeof(device);
  176.     io.output   = NULL;
  177.     io.out_size = 0;
  178.  
  179.     if (call_service(&io)!=0)
  180.     {
  181.         free(sna);
  182.         return 0;
  183.     };
  184.    
  185.     sna->PciInfo = &device;
  186.  
  187.         sna->info = intel_detect_chipset(sna->PciInfo);
  188.  
  189.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  190.    
  191.     delay(10);
  192. /*
  193.     if (!xf86ReturnOptValBool(sna->Options,
  194.                   OPTION_RELAXED_FENCING,
  195.                   sna->kgem.has_relaxed_fencing)) {
  196.         xf86DrvMsg(scrn->scrnIndex,
  197.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  198.                "Disabling use of relaxed fencing\n");
  199.         sna->kgem.has_relaxed_fencing = 0;
  200.     }
  201.     if (!xf86ReturnOptValBool(sna->Options,
  202.                   OPTION_VMAP,
  203.                   sna->kgem.has_vmap)) {
  204.         xf86DrvMsg(scrn->scrnIndex,
  205.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  206.                "Disabling use of vmap\n");
  207.         sna->kgem.has_vmap = 0;
  208.     }
  209. */
  210.  
  211.     /* Disable tiling by default */
  212.     sna->tiling = SNA_TILING_DISABLE;
  213.  
  214.     /* Default fail-safe value of 75 Hz */
  215. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  216.  
  217.     sna->flags = 0;
  218.  
  219.     sna_accel_init(sna);
  220.  
  221.     delay(10);
  222.    
  223.     return sna->render.caps;
  224. }
  225.  
  226. void sna_fini()
  227. {
  228.     if( sna_device )
  229.     {
  230.         sna_device->render.fini(sna_device);
  231.         kgem_bo_destroy(&sna_device->kgem, mask_bo);
  232.         kgem_close_batches(&sna_device->kgem);        
  233.             kgem_cleanup_cache(&sna_device->kgem);
  234.     };
  235. }
  236.  
  237. #if 0
  238.  
  239. static bool sna_solid_cache_init(struct sna *sna)
  240. {
  241.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  242.  
  243.     DBG(("%s\n", __FUNCTION__));
  244.  
  245.     cache->cache_bo =
  246.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  247.     if (!cache->cache_bo)
  248.         return FALSE;
  249.  
  250.     /*
  251.      * Initialise [0] with white since it is very common and filling the
  252.      * zeroth slot simplifies some of the checks.
  253.      */
  254.     cache->color[0] = 0xffffffff;
  255.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  256.     cache->bo[0]->pitch = 4;
  257.     cache->dirty = 1;
  258.     cache->size = 1;
  259.     cache->last = 0;
  260.  
  261.     return TRUE;
  262. }
  263.  
  264. void
  265. sna_render_flush_solid(struct sna *sna)
  266. {
  267.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  268.  
  269.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  270.     assert(cache->dirty);
  271.     assert(cache->size);
  272.  
  273.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  274.               cache->color, cache->size*sizeof(uint32_t));
  275.     cache->dirty = 0;
  276.     cache->last = 0;
  277. }
  278.  
  279. static void
  280. sna_render_finish_solid(struct sna *sna, bool force)
  281. {
  282.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  283.     int i;
  284.  
  285.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  286.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  287.  
  288.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  289.         return;
  290.  
  291.     if (cache->dirty)
  292.         sna_render_flush_solid(sna);
  293.  
  294.     for (i = 0; i < cache->size; i++) {
  295.         if (cache->bo[i] == NULL)
  296.             continue;
  297.  
  298.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  299.         cache->bo[i] = NULL;
  300.     }
  301.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  302.  
  303.     DBG(("sna_render_finish_solid reset\n"));
  304.  
  305.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  306.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  307.     cache->bo[0]->pitch = 4;
  308.     if (force)
  309.         cache->size = 1;
  310. }
  311.  
  312.  
  313. struct kgem_bo *
  314. sna_render_get_solid(struct sna *sna, uint32_t color)
  315. {
  316.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  317.     int i;
  318.  
  319.     DBG(("%s: %08x\n", __FUNCTION__, color));
  320.  
  321. //    if ((color & 0xffffff) == 0) /* alpha only */
  322. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  323.  
  324.     if (color == 0xffffffff) {
  325.         DBG(("%s(white)\n", __FUNCTION__));
  326.         return kgem_bo_reference(cache->bo[0]);
  327.     }
  328.  
  329.     if (cache->color[cache->last] == color) {
  330.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  331.              cache->last, color));
  332.         return kgem_bo_reference(cache->bo[cache->last]);
  333.     }
  334.  
  335.     for (i = 1; i < cache->size; i++) {
  336.         if (cache->color[i] == color) {
  337.             if (cache->bo[i] == NULL) {
  338.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  339.                      i, color));
  340.                 goto create;
  341.             } else {
  342.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  343.                      i, color));
  344.                 goto done;
  345.             }
  346.         }
  347.     }
  348.  
  349.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  350.  
  351.     i = cache->size++;
  352.     cache->color[i] = color;
  353.     cache->dirty = 1;
  354.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  355.  
  356. create:
  357.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  358.                      i*sizeof(uint32_t), sizeof(uint32_t));
  359.     cache->bo[i]->pitch = 4;
  360.  
  361. done:
  362.     cache->last = i;
  363.     return kgem_bo_reference(cache->bo[i]);
  364. }
  365.  
  366.  
  367.  
  368. int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
  369.                   int w, int h, int src_x, int src_y)
  370.  
  371. {
  372.     struct sna_copy_op copy;
  373.     struct _Pixmap src, dst;
  374.     struct kgem_bo *src_bo;
  375.  
  376.     char proc_info[1024];
  377.     int winx, winy;
  378.  
  379.     get_proc_info(proc_info);
  380.  
  381.     winx = *(uint32_t*)(proc_info+34);
  382.     winy = *(uint32_t*)(proc_info+38);
  383.    
  384.     memset(&src, 0, sizeof(src));
  385.     memset(&dst, 0, sizeof(dst));
  386.  
  387.     src.drawable.bitsPerPixel = 32;
  388.     src.drawable.width  = src_bitmap->width;
  389.     src.drawable.height = src_bitmap->height;
  390.  
  391.     dst.drawable.bitsPerPixel = 32;
  392.     dst.drawable.width  = sna_fb.width;
  393.     dst.drawable.height = sna_fb.height;
  394.    
  395.     memset(&copy, 0, sizeof(copy));
  396.  
  397.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  398.    
  399.     if( sna_device->render.copy(sna_device, GXcopy,
  400.                                 &src, src_bo,
  401.                                 &dst, sna_fb.fb_bo, &copy) )
  402.     {                            
  403.         copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
  404.         copy.done(sna_device, &copy);
  405.     }
  406.  
  407.     kgem_submit(&sna_device->kgem);
  408.    
  409. //    __asm__ __volatile__("int3");
  410.    
  411. };
  412. #endif
  413.  
  414.  
  415. int sna_create_bitmap(bitmap_t *bitmap)
  416. {
  417.         struct kgem_bo *bo;
  418.    
  419.     bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
  420.                         32,I915_TILING_NONE, CREATE_CPU_MAP);
  421.    
  422.     if(bo == NULL)
  423.         goto err_1;
  424.      
  425.     void *map = kgem_bo_map(&sna_device->kgem, bo);
  426.     if(map == NULL)
  427.         goto err_2;
  428.        
  429.     bitmap->handle = (uint32_t)bo;
  430.     bitmap->pitch  = bo->pitch;
  431.     bitmap->data   = map;
  432.    
  433.     return 0;
  434.    
  435. err_2:
  436.     kgem_bo_destroy(&sna_device->kgem, bo);
  437.    
  438. err_1:
  439.     return -1;
  440.            
  441. };
  442.  
  443. void sna_destroy_bitmap(bitmap_t *bitmap)
  444. {
  445.         struct kgem_bo *bo;
  446.    
  447.     bo = (struct kgem_bo *)bitmap->handle;
  448.        
  449.     kgem_bo_destroy(&sna_device->kgem, bo);
  450.  
  451. };
  452.  
  453. void sna_lock_bitmap(bitmap_t *bitmap)
  454. {
  455.         struct kgem_bo *bo;
  456.    
  457.     bo = (struct kgem_bo *)bitmap->handle;
  458.        
  459.     kgem_bo_sync__cpu(&sna_device->kgem, bo);
  460.  
  461. };
  462.  
  463. int sna_create_mask()
  464. {
  465.         struct kgem_bo *bo;
  466.     int width, height;
  467.     int i;
  468.  
  469.     printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
  470.    
  471.     bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
  472.                         8,I915_TILING_NONE, CREATE_CPU_MAP);
  473.    
  474.     if(bo == NULL)
  475.         goto err_1;
  476.      
  477.     int *map = kgem_bo_map(&sna_device->kgem, bo);
  478.     if(map == NULL)
  479.         goto err_2;
  480.        
  481.     memset(map, 0, bo->pitch * height);
  482.    
  483.     mask_bo     = bo;
  484.     mask_width  = width;
  485.     mask_height = height;
  486.    
  487.     return 0;
  488.    
  489. err_2:
  490.     kgem_bo_destroy(&sna_device->kgem, bo);
  491.    
  492. err_1:
  493.     return -1;
  494.            
  495. };
  496.  
  497.  
  498. bool
  499. gen6_composite(struct sna *sna,
  500.               uint8_t op,
  501.                       PixmapPtr src, struct kgem_bo *src_bo,
  502.                       PixmapPtr mask,struct kgem_bo *mask_bo,
  503.                       PixmapPtr dst, struct kgem_bo *dst_bo,
  504.               int32_t src_x, int32_t src_y,
  505.               int32_t msk_x, int32_t msk_y,
  506.               int32_t dst_x, int32_t dst_y,
  507.               int32_t width, int32_t height,
  508.               struct sna_composite_op *tmp);
  509.  
  510.  
  511. #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
  512.  
  513. int sna_blit_tex(bitmap_t *src_bitmap, int dst_x, int dst_y,
  514.                   int w, int h, int src_x, int src_y)
  515.  
  516. {
  517.  
  518. //    box.x1 = dst_x;
  519. //    box.y1 = dst_y;
  520. //    box.x2 = dst_x+w;
  521. //    box.y2 = dst_y+h;
  522.  
  523.  
  524.  //   cop.box(sna_device, &cop, &box);
  525.  
  526.     struct drm_i915_mask_update update;
  527.    
  528.     struct sna_composite_op composite;
  529.     struct _Pixmap src, dst, mask;
  530.     struct kgem_bo *src_bo;
  531.  
  532.     char proc_info[1024];
  533.     int winx, winy, winw, winh;
  534.  
  535.     get_proc_info(proc_info);
  536.  
  537.     winx = *(uint32_t*)(proc_info+34);
  538.     winy = *(uint32_t*)(proc_info+38);
  539.     winw = *(uint32_t*)(proc_info+42)+1;
  540.     winh = *(uint32_t*)(proc_info+46)+1;
  541.    
  542.     VG_CLEAR(update);
  543.         update.handle = mask_bo->handle;
  544. //      update.bo_size   = __kgem_bo_size(mask_bo);
  545. //      update.bo_pitch  = mask_bo->pitch;
  546.         update.bo_map    = (__u32)MAP(mask_bo->map);
  547.         drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
  548.     mask_bo->pitch = update.bo_pitch;
  549.    
  550.     memset(&src, 0, sizeof(src));
  551.     memset(&dst, 0, sizeof(dst));
  552.     memset(&mask, 0, sizeof(dst));
  553.  
  554.     src.drawable.bitsPerPixel = 32;
  555.     src.drawable.width  = src_bitmap->width;
  556.     src.drawable.height = src_bitmap->height;
  557.  
  558.     dst.drawable.bitsPerPixel = 32;
  559.     dst.drawable.width  = sna_fb.width;
  560.     dst.drawable.height = sna_fb.height;
  561.    
  562.     mask.drawable.bitsPerPixel = 8;
  563.     mask.drawable.width  = update.width;
  564.     mask.drawable.height = update.height;
  565.  
  566.     memset(&composite, 0, sizeof(composite));
  567.  
  568.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  569.    
  570.    
  571.     if( sna_device->render.blit_tex(sna_device, PictOpSrc,
  572.                       &src, src_bo,
  573.                       &mask, mask_bo,
  574.                       &dst, sna_fb.fb_bo,
  575.               src_x, src_y,
  576.               dst_x, dst_y,
  577.               winx+dst_x, winy+dst_y,
  578.               w, h,
  579.               &composite) )
  580.     {
  581.             struct sna_composite_rectangles r;
  582.        
  583.             r.src.x = src_x;
  584.             r.src.y = src_y;
  585.             r.mask.x = dst_x;
  586.             r.mask.y = dst_y;
  587.                 r.dst.x = winx+dst_x;
  588.             r.dst.y = winy+dst_y;
  589.             r.width  = w;
  590.             r.height = h;
  591.        
  592.         composite.blt(sna_device, &composite, &r);
  593.         composite.done(sna_device, &composite);
  594.     };
  595.    
  596.     kgem_submit(&sna_device->kgem);
  597.  
  598.     return 0;            
  599. }
  600.  
  601.  
  602.  
  603.  
  604.  
  605.  
  606.  
  607.  
  608.  
  609. static const struct intel_device_info intel_generic_info = {
  610.         .gen = -1,
  611. };
  612.  
  613. static const struct intel_device_info intel_i915_info = {
  614.         .gen = 030,
  615. };
  616. static const struct intel_device_info intel_i945_info = {
  617.         .gen = 031,
  618. };
  619.  
  620. static const struct intel_device_info intel_g33_info = {
  621.         .gen = 033,
  622. };
  623.  
  624. static const struct intel_device_info intel_i965_info = {
  625.         .gen = 040,
  626. };
  627.  
  628. static const struct intel_device_info intel_g4x_info = {
  629.         .gen = 045,
  630. };
  631.  
  632. static const struct intel_device_info intel_ironlake_info = {
  633.         .gen = 050,
  634. };
  635.  
  636. static const struct intel_device_info intel_sandybridge_info = {
  637.         .gen = 060,
  638. };
  639.  
  640. static const struct intel_device_info intel_ivybridge_info = {
  641.         .gen = 070,
  642. };
  643.  
  644. static const struct intel_device_info intel_valleyview_info = {
  645.         .gen = 071,
  646. };
  647.  
  648. static const struct intel_device_info intel_haswell_info = {
  649.         .gen = 075,
  650. };
  651.  
  652. #define INTEL_DEVICE_MATCH(d,i) \
  653.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  654.  
  655.  
  656. static const struct pci_id_match intel_device_match[] = {
  657.  
  658.  
  659.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  660.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  661.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  662.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  663.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  664.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  665.  
  666.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  667.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  668.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  669.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  670.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  671.          * like its G35 brethren.
  672.          */
  673.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  674.  
  675.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  676.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  677.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  678.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  679.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  680.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  681.  
  682.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  683.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  684.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  685.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  686.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  687.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  688.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  689.  
  690.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  691.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  692.  
  693.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  694.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  695.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  696.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  697.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  698.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  699.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  700.  
  701.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  702.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  703.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  704.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  705.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  706.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  707.  
  708.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  709.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  710.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  711.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  712.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  713.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  714.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  715.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  716.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  717.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  718.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  719.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  720.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  721.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  722.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  723.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  724.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  725.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  726.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  727.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  728.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  729.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  730.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  731.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  732.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  733.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  734.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  735.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  736.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  737.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  738.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  739.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  740.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  741.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  742.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  743.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  744.  
  745.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  746.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  747.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  748.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  749.  
  750.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  751.  
  752.         { 0, 0, 0 },
  753. };
  754.  
  755. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  756. {
  757.     while(list->device_id)
  758.     {
  759.         if(dev==list->device_id)
  760.             return list;
  761.         list++;
  762.     }
  763.     return NULL;
  764. }
  765.  
  766. const struct intel_device_info *
  767. intel_detect_chipset(struct pci_device *pci)
  768. {
  769.     const struct pci_id_match *ent = NULL;
  770.         const char *name = NULL;
  771.         int i;
  772.  
  773.     ent = PciDevMatch(pci->device_id, intel_device_match);
  774.    
  775.     if(ent != NULL)
  776.         return (const struct intel_device_info*)ent->match_data;
  777.     else    
  778.         return &intel_generic_info;
  779.        
  780. #if 0        
  781.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  782.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  783.                         name = intel_chipsets[i].name;
  784.                         break;
  785.                 }
  786.         }
  787.         if (name == NULL) {
  788.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  789.                 name = "unknown";
  790.         } else {
  791.                 xf86DrvMsg(scrn->scrnIndex, from,
  792.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  793.                            name);
  794.         }
  795.  
  796.         scrn->chipset = name;
  797. #endif
  798.        
  799. }
  800.  
  801.  
  802. int drmIoctl(int fd, unsigned long request, void *arg)
  803. {
  804.     ioctl_t  io;
  805.  
  806.     io.handle   = fd;
  807.     io.io_code  = request;
  808.     io.input    = arg;
  809.     io.inp_size = 64;
  810.     io.output   = NULL;
  811.     io.out_size = 0;
  812.  
  813.     return call_service(&io);
  814. }
  815.  
  816.  
  817.  
  818.