Subversion Repositories Kolibri OS

Rev

Rev 3291 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1. //#include "../bitmap.h"
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5.  
  6. #include "sna.h"
  7.  
  8. #include <pixlib2.h>
  9.  
  10. static struct sna_fb sna_fb;
  11. static struct kgem_bo *mask_bo;
  12.  
  13. static int mask_width, mask_height;
  14.  
  15. typedef struct __attribute__((packed))
  16. {
  17.   unsigned      handle;
  18.   unsigned      io_code;
  19.   void          *input;
  20.   int           inp_size;
  21.   void          *output;
  22.   int           out_size;
  23. }ioctl_t;
  24.  
  25.  
  26. static int call_service(ioctl_t *io)
  27. {
  28.   int retval;
  29.  
  30.   asm volatile("int $0x40"
  31.       :"=a"(retval)
  32.       :"a"(68),"b"(17),"c"(io)
  33.       :"memory","cc");
  34.  
  35.   return retval;
  36. };
  37.  
  38. static inline void get_proc_info(char *info)
  39. {
  40.     __asm__ __volatile__(
  41.     "int $0x40"
  42.     :
  43.     :"a"(9), "b"(info), "c"(-1));
  44. }
  45.  
  46. const struct intel_device_info *
  47. intel_detect_chipset(struct pci_device *pci);
  48.  
  49. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  50.  
  51. static bool sna_solid_cache_init(struct sna *sna);
  52.  
  53. struct sna *sna_device;
  54.  
  55. static void no_render_reset(struct sna *sna)
  56. {
  57.         (void)sna;
  58. }
  59.  
  60. void no_render_init(struct sna *sna)
  61. {
  62.     struct sna_render *render = &sna->render;
  63.  
  64.     memset (render,0, sizeof (*render));
  65.  
  66.     render->prefer_gpu = PREFER_GPU_BLT;
  67.  
  68.     render->vertices = render->vertex_data;
  69.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  70.  
  71. //    render->composite = no_render_composite;
  72.  
  73. //    render->copy_boxes = no_render_copy_boxes;
  74. //    render->copy = no_render_copy;
  75.  
  76. //    render->fill_boxes = no_render_fill_boxes;
  77. //    render->fill = no_render_fill;
  78. //    render->fill_one = no_render_fill_one;
  79. //    render->clear = no_render_clear;
  80.  
  81.     render->reset = no_render_reset;
  82. //    render->flush = no_render_flush;
  83. //    render->fini = no_render_fini;
  84.  
  85. //    sna->kgem.context_switch = no_render_context_switch;
  86. //    sna->kgem.retire = no_render_retire;
  87.  
  88.       if (sna->kgem.gen >= 60)
  89.         sna->kgem.ring = KGEM_RENDER;
  90.  
  91.       sna_vertex_init(sna);
  92. }
  93.  
  94. void sna_vertex_init(struct sna *sna)
  95. {
  96. //    pthread_mutex_init(&sna->render.lock, NULL);
  97. //    pthread_cond_init(&sna->render.wait, NULL);
  98.     sna->render.active = 0;
  99. }
  100.  
  101. int sna_accel_init(struct sna *sna)
  102. {
  103.     const char *backend;
  104.  
  105. //    list_init(&sna->deferred_free);
  106. //    list_init(&sna->dirty_pixmaps);
  107. //    list_init(&sna->active_pixmaps);
  108. //    list_init(&sna->inactive_clock[0]);
  109. //    list_init(&sna->inactive_clock[1]);
  110.  
  111. //    sna_accel_install_timers(sna);
  112.  
  113.  
  114.     backend = "no";
  115.     no_render_init(sna);
  116.  
  117.         if (sna->info->gen >= 0100) {
  118.         } else if (sna->info->gen >= 070) {
  119.                 if (gen7_render_init(sna))
  120.                         backend = "IvyBridge";
  121.         } else if (sna->info->gen >= 060) {
  122.                 if (gen6_render_init(sna))
  123.                         backend = "SandyBridge";
  124.         } else if (sna->info->gen >= 050) {
  125.                 if (gen5_render_init(sna))
  126.                         backend = "Ironlake";
  127.         } else if (sna->info->gen >= 040) {
  128.                 if (gen4_render_init(sna))
  129.                         backend = "Broadwater/Crestline";
  130.         } else if (sna->info->gen >= 030) {
  131.                 if (gen3_render_init(sna))
  132.                         backend = "gen3";
  133.         }
  134.  
  135.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  136.              __FUNCTION__, backend, sna->render.prefer_gpu));
  137.  
  138.     kgem_reset(&sna->kgem);
  139.  
  140. //    if (!sna_solid_cache_init(sna))
  141. //        return false;
  142.  
  143.     sna_device = sna;
  144.  
  145.  
  146.     return kgem_init_fb(&sna->kgem, &sna_fb);
  147. }
  148.  
  149. int sna_init(uint32_t service)
  150. {
  151.     ioctl_t   io;
  152.  
  153.     static struct pci_device device;
  154.     struct sna *sna;
  155.  
  156.     DBG(("%s\n", __FUNCTION__));
  157.  
  158.     sna = malloc(sizeof(*sna));
  159.     if (sna == NULL)
  160.         return 0;
  161.  
  162.     memset(sna, 0, sizeof(*sna));
  163.    
  164.     io.handle   = service;
  165.     io.io_code  = SRV_GET_PCI_INFO;
  166.     io.input    = &device;
  167.     io.inp_size = sizeof(device);
  168.     io.output   = NULL;
  169.     io.out_size = 0;
  170.  
  171.     if (call_service(&io)!=0)
  172.     {
  173.         free(sna);
  174.         return 0;
  175.     };
  176.    
  177.     sna->PciInfo = &device;
  178.  
  179.         sna->info = intel_detect_chipset(sna->PciInfo);
  180.  
  181.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  182.    
  183. /*
  184.     if (!xf86ReturnOptValBool(sna->Options,
  185.                   OPTION_RELAXED_FENCING,
  186.                   sna->kgem.has_relaxed_fencing)) {
  187.         xf86DrvMsg(scrn->scrnIndex,
  188.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  189.                "Disabling use of relaxed fencing\n");
  190.         sna->kgem.has_relaxed_fencing = 0;
  191.     }
  192.     if (!xf86ReturnOptValBool(sna->Options,
  193.                   OPTION_VMAP,
  194.                   sna->kgem.has_vmap)) {
  195.         xf86DrvMsg(scrn->scrnIndex,
  196.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  197.                "Disabling use of vmap\n");
  198.         sna->kgem.has_vmap = 0;
  199.     }
  200. */
  201.  
  202.     /* Disable tiling by default */
  203.     sna->tiling = SNA_TILING_DISABLE;
  204.  
  205.     /* Default fail-safe value of 75 Hz */
  206. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  207.  
  208.     sna->flags = 0;
  209.  
  210.     sna_accel_init(sna);
  211.  
  212.     delay(10);
  213.    
  214.     return sna->render.caps;
  215. }
  216.  
  217. void sna_fini()
  218. {
  219.     if( sna_device )
  220.     {
  221.         sna_device->render.fini(sna_device);
  222.         kgem_bo_destroy(&sna_device->kgem, mask_bo);
  223.         kgem_close_batches(&sna_device->kgem);        
  224.             kgem_cleanup_cache(&sna_device->kgem);
  225.     };
  226. }
  227.  
  228. #if 0
  229.  
  230. static bool sna_solid_cache_init(struct sna *sna)
  231. {
  232.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  233.  
  234.     DBG(("%s\n", __FUNCTION__));
  235.  
  236.     cache->cache_bo =
  237.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  238.     if (!cache->cache_bo)
  239.         return FALSE;
  240.  
  241.     /*
  242.      * Initialise [0] with white since it is very common and filling the
  243.      * zeroth slot simplifies some of the checks.
  244.      */
  245.     cache->color[0] = 0xffffffff;
  246.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  247.     cache->bo[0]->pitch = 4;
  248.     cache->dirty = 1;
  249.     cache->size = 1;
  250.     cache->last = 0;
  251.  
  252.     return TRUE;
  253. }
  254.  
  255. void
  256. sna_render_flush_solid(struct sna *sna)
  257. {
  258.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  259.  
  260.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  261.     assert(cache->dirty);
  262.     assert(cache->size);
  263.  
  264.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  265.               cache->color, cache->size*sizeof(uint32_t));
  266.     cache->dirty = 0;
  267.     cache->last = 0;
  268. }
  269.  
  270. static void
  271. sna_render_finish_solid(struct sna *sna, bool force)
  272. {
  273.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  274.     int i;
  275.  
  276.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  277.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  278.  
  279.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  280.         return;
  281.  
  282.     if (cache->dirty)
  283.         sna_render_flush_solid(sna);
  284.  
  285.     for (i = 0; i < cache->size; i++) {
  286.         if (cache->bo[i] == NULL)
  287.             continue;
  288.  
  289.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  290.         cache->bo[i] = NULL;
  291.     }
  292.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  293.  
  294.     DBG(("sna_render_finish_solid reset\n"));
  295.  
  296.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  297.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  298.     cache->bo[0]->pitch = 4;
  299.     if (force)
  300.         cache->size = 1;
  301. }
  302.  
  303.  
  304. struct kgem_bo *
  305. sna_render_get_solid(struct sna *sna, uint32_t color)
  306. {
  307.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  308.     int i;
  309.  
  310.     DBG(("%s: %08x\n", __FUNCTION__, color));
  311.  
  312. //    if ((color & 0xffffff) == 0) /* alpha only */
  313. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  314.  
  315.     if (color == 0xffffffff) {
  316.         DBG(("%s(white)\n", __FUNCTION__));
  317.         return kgem_bo_reference(cache->bo[0]);
  318.     }
  319.  
  320.     if (cache->color[cache->last] == color) {
  321.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  322.              cache->last, color));
  323.         return kgem_bo_reference(cache->bo[cache->last]);
  324.     }
  325.  
  326.     for (i = 1; i < cache->size; i++) {
  327.         if (cache->color[i] == color) {
  328.             if (cache->bo[i] == NULL) {
  329.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  330.                      i, color));
  331.                 goto create;
  332.             } else {
  333.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  334.                      i, color));
  335.                 goto done;
  336.             }
  337.         }
  338.     }
  339.  
  340.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  341.  
  342.     i = cache->size++;
  343.     cache->color[i] = color;
  344.     cache->dirty = 1;
  345.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  346.  
  347. create:
  348.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  349.                      i*sizeof(uint32_t), sizeof(uint32_t));
  350.     cache->bo[i]->pitch = 4;
  351.  
  352. done:
  353.     cache->last = i;
  354.     return kgem_bo_reference(cache->bo[i]);
  355. }
  356.  
  357.  
  358.  
  359. int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
  360.                   int w, int h, int src_x, int src_y)
  361.  
  362. {
  363.     struct sna_copy_op copy;
  364.     struct _Pixmap src, dst;
  365.     struct kgem_bo *src_bo;
  366.  
  367.     char proc_info[1024];
  368.     int winx, winy;
  369.  
  370.     get_proc_info(proc_info);
  371.  
  372.     winx = *(uint32_t*)(proc_info+34);
  373.     winy = *(uint32_t*)(proc_info+38);
  374.    
  375.     memset(&src, 0, sizeof(src));
  376.     memset(&dst, 0, sizeof(dst));
  377.  
  378.     src.drawable.bitsPerPixel = 32;
  379.     src.drawable.width  = src_bitmap->width;
  380.     src.drawable.height = src_bitmap->height;
  381.  
  382.     dst.drawable.bitsPerPixel = 32;
  383.     dst.drawable.width  = sna_fb.width;
  384.     dst.drawable.height = sna_fb.height;
  385.    
  386.     memset(&copy, 0, sizeof(copy));
  387.  
  388.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  389.    
  390.     if( sna_device->render.copy(sna_device, GXcopy,
  391.                                 &src, src_bo,
  392.                                 &dst, sna_fb.fb_bo, &copy) )
  393.     {                            
  394.         copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
  395.         copy.done(sna_device, &copy);
  396.     }
  397.  
  398.     kgem_submit(&sna_device->kgem);
  399.    
  400. //    __asm__ __volatile__("int3");
  401.    
  402. };
  403. #endif
  404.  
  405.  
  406. int sna_create_bitmap(bitmap_t *bitmap)
  407. {
  408.         struct kgem_bo *bo;
  409.    
  410.     bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
  411.                         32,I915_TILING_NONE, CREATE_CPU_MAP);
  412.    
  413.     if(bo == NULL)
  414.         goto err_1;
  415.      
  416.     void *map = kgem_bo_map(&sna_device->kgem, bo);
  417.     if(map == NULL)
  418.         goto err_2;
  419.        
  420.     bitmap->handle = (uint32_t)bo;
  421.     bitmap->pitch  = bo->pitch;
  422.     bitmap->data   = map;
  423.    
  424.     return 0;
  425.    
  426. err_2:
  427.     kgem_bo_destroy(&sna_device->kgem, bo);
  428.    
  429. err_1:
  430.     return -1;
  431.            
  432. };
  433.  
  434. void sna_destroy_bitmap(bitmap_t *bitmap)
  435. {
  436.         struct kgem_bo *bo;
  437.    
  438.     bo = (struct kgem_bo *)bitmap->handle;
  439.        
  440.     kgem_bo_destroy(&sna_device->kgem, bo);
  441.  
  442. };
  443.  
  444. void sna_lock_bitmap(bitmap_t *bitmap)
  445. {
  446.         struct kgem_bo *bo;
  447.    
  448.     bo = (struct kgem_bo *)bitmap->handle;
  449.        
  450.     kgem_bo_sync__cpu(&sna_device->kgem, bo);
  451.  
  452. };
  453.  
  454. int sna_create_mask()
  455. {
  456.         struct kgem_bo *bo;
  457.     int width, height;
  458.     int i;
  459.  
  460. //    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
  461.    
  462.     bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
  463.                         8,I915_TILING_NONE, CREATE_CPU_MAP);
  464.    
  465.     if(bo == NULL)
  466.         goto err_1;
  467.      
  468.     int *map = kgem_bo_map(&sna_device->kgem, bo);
  469.     if(map == NULL)
  470.         goto err_2;
  471.        
  472.     memset(map, 0, bo->pitch * height);
  473.    
  474.     mask_bo     = bo;
  475.     mask_width  = width;
  476.     mask_height = height;
  477.    
  478.     return 0;
  479.    
  480. err_2:
  481.     kgem_bo_destroy(&sna_device->kgem, bo);
  482.    
  483. err_1:
  484.     return -1;
  485.            
  486. };
  487.  
  488.  
  489. bool
  490. gen6_composite(struct sna *sna,
  491.               uint8_t op,
  492.                       PixmapPtr src, struct kgem_bo *src_bo,
  493.                       PixmapPtr mask,struct kgem_bo *mask_bo,
  494.                       PixmapPtr dst, struct kgem_bo *dst_bo,
  495.               int32_t src_x, int32_t src_y,
  496.               int32_t msk_x, int32_t msk_y,
  497.               int32_t dst_x, int32_t dst_y,
  498.               int32_t width, int32_t height,
  499.               struct sna_composite_op *tmp);
  500.  
  501.  
  502. #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
  503.  
  504. int sna_blit_tex(bitmap_t *src_bitmap, int dst_x, int dst_y,
  505.                   int w, int h, int src_x, int src_y)
  506.  
  507. {
  508.  
  509. //    box.x1 = dst_x;
  510. //    box.y1 = dst_y;
  511. //    box.x2 = dst_x+w;
  512. //    box.y2 = dst_y+h;
  513.  
  514.  
  515.  //   cop.box(sna_device, &cop, &box);
  516.  
  517.     struct drm_i915_mask_update update;
  518.    
  519.     struct sna_composite_op composite;
  520.     struct _Pixmap src, dst, mask;
  521.     struct kgem_bo *src_bo;
  522.  
  523.     char proc_info[1024];
  524.     int winx, winy, winw, winh;
  525.  
  526.     get_proc_info(proc_info);
  527.  
  528.     winx = *(uint32_t*)(proc_info+34);
  529.     winy = *(uint32_t*)(proc_info+38);
  530.     winw = *(uint32_t*)(proc_info+42)+1;
  531.     winh = *(uint32_t*)(proc_info+46)+1;
  532.    
  533.     VG_CLEAR(update);
  534.         update.handle = mask_bo->handle;
  535. //      update.bo_size   = __kgem_bo_size(mask_bo);
  536. //      update.bo_pitch  = mask_bo->pitch;
  537.         update.bo_map    = (__u32)MAP(mask_bo->map);
  538.         drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
  539.     mask_bo->pitch = update.bo_pitch;
  540.    
  541.     memset(&src, 0, sizeof(src));
  542.     memset(&dst, 0, sizeof(dst));
  543.     memset(&mask, 0, sizeof(dst));
  544.  
  545.     src.drawable.bitsPerPixel = 32;
  546.     src.drawable.width  = src_bitmap->width;
  547.     src.drawable.height = src_bitmap->height;
  548.  
  549.     dst.drawable.bitsPerPixel = 32;
  550.     dst.drawable.width  = sna_fb.width;
  551.     dst.drawable.height = sna_fb.height;
  552.    
  553.     mask.drawable.bitsPerPixel = 8;
  554.     mask.drawable.width  = update.width;
  555.     mask.drawable.height = update.height;
  556.  
  557.     memset(&composite, 0, sizeof(composite));
  558.  
  559.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  560.    
  561.    
  562.     if( sna_device->render.blit_tex(sna_device, PictOpSrc,
  563.                       &src, src_bo,
  564.                       &mask, mask_bo,
  565.                       &dst, sna_fb.fb_bo,
  566.               src_x, src_y,
  567.               dst_x, dst_y,
  568.               winx+dst_x, winy+dst_y,
  569.               w, h,
  570.               &composite) )
  571.     {
  572.             struct sna_composite_rectangles r;
  573.        
  574.             r.src.x = src_x;
  575.             r.src.y = src_y;
  576.             r.mask.x = dst_x;
  577.             r.mask.y = dst_y;
  578.                 r.dst.x = winx+dst_x;
  579.             r.dst.y = winy+dst_y;
  580.             r.width  = w;
  581.             r.height = h;
  582.        
  583.         composite.blt(sna_device, &composite, &r);
  584.         composite.done(sna_device, &composite);
  585.     };
  586.    
  587.     kgem_submit(&sna_device->kgem);
  588.  
  589.     return 0;            
  590. }
  591.  
  592.  
  593.  
  594.  
  595.  
  596.  
  597.  
  598.  
  599.  
  600. static const struct intel_device_info intel_generic_info = {
  601.         .gen = -1,
  602. };
  603.  
  604. static const struct intel_device_info intel_i915_info = {
  605.         .gen = 030,
  606. };
  607. static const struct intel_device_info intel_i945_info = {
  608.         .gen = 031,
  609. };
  610.  
  611. static const struct intel_device_info intel_g33_info = {
  612.         .gen = 033,
  613. };
  614.  
  615. static const struct intel_device_info intel_i965_info = {
  616.         .gen = 040,
  617. };
  618.  
  619. static const struct intel_device_info intel_g4x_info = {
  620.         .gen = 045,
  621. };
  622.  
  623. static const struct intel_device_info intel_ironlake_info = {
  624.         .gen = 050,
  625. };
  626.  
  627. static const struct intel_device_info intel_sandybridge_info = {
  628.         .gen = 060,
  629. };
  630.  
  631. static const struct intel_device_info intel_ivybridge_info = {
  632.         .gen = 070,
  633. };
  634.  
  635. static const struct intel_device_info intel_valleyview_info = {
  636.         .gen = 071,
  637. };
  638.  
  639. static const struct intel_device_info intel_haswell_info = {
  640.         .gen = 075,
  641. };
  642.  
  643. #define INTEL_DEVICE_MATCH(d,i) \
  644.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  645.  
  646.  
  647. static const struct pci_id_match intel_device_match[] = {
  648.  
  649.  
  650.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  651.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  652.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  653.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  654.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  655.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  656.  
  657.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  658.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  659.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  660.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  661.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  662.          * like its G35 brethren.
  663.          */
  664.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  665.  
  666.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  667.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  668.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  669.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  670.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  671.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  672.  
  673.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  674.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  675.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  676.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  677.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  678.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  679.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  680.  
  681.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  682.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  683.  
  684.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  685.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  686.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  687.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  688.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  689.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  690.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  691.  
  692.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  693.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  694.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  695.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  696.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  697.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  698.  
  699.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  700.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  701.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  702.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  703.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  704.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  705.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  706.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  707.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  708.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  709.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  710.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  711.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  712.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  713.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  714.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  715.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  716.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  717.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  718.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  719.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  720.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  721.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  722.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  723.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  724.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  725.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  726.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  727.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  728.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  729.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  730.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  731.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  732.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  733.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  734.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  735.  
  736.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  737.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  738.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  739.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  740.  
  741.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  742.  
  743.         { 0, 0, 0 },
  744. };
  745.  
  746. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  747. {
  748.     while(list->device_id)
  749.     {
  750.         if(dev==list->device_id)
  751.             return list;
  752.         list++;
  753.     }
  754.     return NULL;
  755. }
  756.  
  757. const struct intel_device_info *
  758. intel_detect_chipset(struct pci_device *pci)
  759. {
  760.     const struct pci_id_match *ent = NULL;
  761.         const char *name = NULL;
  762.         int i;
  763.  
  764.     ent = PciDevMatch(pci->device_id, intel_device_match);
  765.    
  766.     if(ent != NULL)
  767.         return (const struct intel_device_info*)ent->match_data;
  768.     else    
  769.         return &intel_generic_info;
  770.        
  771. #if 0        
  772.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  773.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  774.                         name = intel_chipsets[i].name;
  775.                         break;
  776.                 }
  777.         }
  778.         if (name == NULL) {
  779.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  780.                 name = "unknown";
  781.         } else {
  782.                 xf86DrvMsg(scrn->scrnIndex, from,
  783.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  784.                            name);
  785.         }
  786.  
  787.         scrn->chipset = name;
  788. #endif
  789.        
  790. }
  791.  
  792.  
  793. int drmIoctl(int fd, unsigned long request, void *arg)
  794. {
  795.     ioctl_t  io;
  796.  
  797.     io.handle   = fd;
  798.     io.io_code  = request;
  799.     io.input    = arg;
  800.     io.inp_size = 64;
  801.     io.output   = NULL;
  802.     io.out_size = 0;
  803.  
  804.     return call_service(&io);
  805. }
  806.  
  807.  
  808.  
  809.