Subversion Repositories Kolibri OS

Rev

Rev 3299 | Blame | Last modification | View Log | Download | RSS feed

  1.  
  2.  
  3. #include <memory.h>
  4. #include <malloc.h>
  5. #include <kos32sys.h>
  6. #include <pixlib2.h>
  7.  
  8. #include "sna.h"
  9.  
  10. #define to_surface(x) (surface_t*)((x)->handle)
  11.  
  12. static struct sna_fb sna_fb;
  13. static int    tls_mask;
  14.  
  15. int tls_alloc(void);
  16.  
  17. static inline void *tls_get(int key)
  18. {
  19.     void *val;
  20.     __asm__ __volatile__(
  21.     "movl %%fs:(%1), %0"
  22.     :"=r"(val)
  23.     :"r"(key));
  24.  
  25.   return val;
  26. };
  27.  
  28. static inline int
  29. tls_set(int key, const void *ptr)
  30. {
  31.     if(!(key & 3))
  32.     {
  33.         __asm__ __volatile__(
  34.         "movl %0, %%fs:(%1)"
  35.         ::"r"(ptr),"r"(key));
  36.         return 0;
  37.     }
  38.     else return -1;
  39. }
  40.  
  41.  
  42.  
  43.  
  44. int kgem_init_fb(struct kgem *kgem, struct sna_fb *fb);
  45. int kgem_update_fb(struct kgem *kgem, struct sna_fb *fb);
  46. uint32_t kgem_surface_size(struct kgem *kgem,bool relaxed_fencing,
  47.                                   unsigned flags, uint32_t width, uint32_t height,
  48.                                   uint32_t bpp, uint32_t tiling, uint32_t *pitch);
  49.  
  50. void kgem_close_batches(struct kgem *kgem);
  51. void sna_bo_destroy(struct kgem *kgem, struct kgem_bo *bo);
  52.  
  53. const struct intel_device_info *
  54. intel_detect_chipset(struct pci_device *pci);
  55.  
  56. //struct kgem_bo *create_bo(bitmap_t *bitmap);
  57.  
  58. static bool sna_solid_cache_init(struct sna *sna);
  59.  
  60. struct sna *sna_device;
  61.  
  62. __LOCK_INIT_RECURSIVE(, __sna_lock);
  63.  
  64. static void no_render_reset(struct sna *sna)
  65. {
  66.         (void)sna;
  67. }
  68.  
  69. void no_render_init(struct sna *sna)
  70. {
  71.     struct sna_render *render = &sna->render;
  72.  
  73.     memset (render,0, sizeof (*render));
  74.  
  75.     render->prefer_gpu = PREFER_GPU_BLT;
  76.  
  77.     render->vertices = render->vertex_data;
  78.     render->vertex_size = ARRAY_SIZE(render->vertex_data);
  79.  
  80. //    render->composite = no_render_composite;
  81.  
  82. //    render->copy_boxes = no_render_copy_boxes;
  83. //    render->copy = no_render_copy;
  84.  
  85. //    render->fill_boxes = no_render_fill_boxes;
  86. //    render->fill = no_render_fill;
  87. //    render->fill_one = no_render_fill_one;
  88. //    render->clear = no_render_clear;
  89.  
  90.     render->reset = no_render_reset;
  91. //    render->flush = no_render_flush;
  92. //    render->fini = no_render_fini;
  93.  
  94. //    sna->kgem.context_switch = no_render_context_switch;
  95. //    sna->kgem.retire = no_render_retire;
  96.  
  97.       if (sna->kgem.gen >= 60)
  98.         sna->kgem.ring = KGEM_RENDER;
  99.  
  100.       sna_vertex_init(sna);
  101. }
  102.  
  103. void sna_vertex_init(struct sna *sna)
  104. {
  105. //    pthread_mutex_init(&sna->render.lock, NULL);
  106. //    pthread_cond_init(&sna->render.wait, NULL);
  107.     sna->render.active = 0;
  108. }
  109.  
  110. int sna_accel_init(struct sna *sna)
  111. {
  112.     const char *backend;
  113.  
  114. //    list_init(&sna->deferred_free);
  115. //    list_init(&sna->dirty_pixmaps);
  116. //    list_init(&sna->active_pixmaps);
  117. //    list_init(&sna->inactive_clock[0]);
  118. //    list_init(&sna->inactive_clock[1]);
  119.  
  120. //    sna_accel_install_timers(sna);
  121.  
  122.  
  123.     backend = "no";
  124.     no_render_init(sna);
  125.  
  126.         if (sna->info->gen >= 0100) {
  127.         } else if (sna->info->gen >= 070) {
  128.                 if (gen7_render_init(sna))
  129.                         backend = "IvyBridge";
  130.         } else if (sna->info->gen >= 060) {
  131.                 if (gen6_render_init(sna))
  132.                         backend = "SandyBridge";
  133.         } else if (sna->info->gen >= 050) {
  134.                 if (gen5_render_init(sna))
  135.                         backend = "Ironlake";
  136.         } else if (sna->info->gen >= 040) {
  137.                 if (gen4_render_init(sna))
  138.                         backend = "Broadwater/Crestline";
  139.         } else if (sna->info->gen >= 030) {
  140.                 if (gen3_render_init(sna))
  141.                         backend = "gen3";
  142.         }
  143.  
  144.         DBG(("%s(backend=%s, prefer_gpu=%x)\n",
  145.              __FUNCTION__, backend, sna->render.prefer_gpu));
  146.  
  147.     kgem_reset(&sna->kgem);
  148.  
  149. //    if (!sna_solid_cache_init(sna))
  150. //        return false;
  151.  
  152.     sna_device = sna;
  153.  
  154.  
  155.     return kgem_init_fb(&sna->kgem, &sna_fb);
  156. }
  157.  
  158. int sna_init(uint32_t service)
  159. {
  160.     ioctl_t   io;
  161.     int caps = 0;
  162.  
  163.     static struct pci_device device;
  164.     struct sna *sna;
  165.  
  166.     DBG(("%s\n", __FUNCTION__));
  167.  
  168.     __lock_acquire_recursive(__sna_lock);
  169.  
  170.     if(sna_device)
  171.         goto done;
  172.    
  173.     io.handle   = service;
  174.     io.io_code  = SRV_GET_PCI_INFO;
  175.     io.input    = &device;
  176.     io.inp_size = sizeof(device);
  177.     io.output   = NULL;
  178.     io.out_size = 0;
  179.  
  180.     if (call_service(&io)!=0)
  181.         goto err1;
  182.    
  183.     sna = malloc(sizeof(*sna));
  184.     if (sna == NULL)
  185.         goto err1;
  186.  
  187.     memset(sna, 0, sizeof(*sna));
  188.    
  189.     sna->PciInfo = &device;
  190.  
  191.         sna->info = intel_detect_chipset(sna->PciInfo);
  192.  
  193.     kgem_init(&sna->kgem, service, sna->PciInfo, sna->info->gen);
  194.    
  195. /*
  196.     if (!xf86ReturnOptValBool(sna->Options,
  197.                   OPTION_RELAXED_FENCING,
  198.                   sna->kgem.has_relaxed_fencing)) {
  199.         xf86DrvMsg(scrn->scrnIndex,
  200.                sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED,
  201.                "Disabling use of relaxed fencing\n");
  202.         sna->kgem.has_relaxed_fencing = 0;
  203.     }
  204.     if (!xf86ReturnOptValBool(sna->Options,
  205.                   OPTION_VMAP,
  206.                   sna->kgem.has_vmap)) {
  207.         xf86DrvMsg(scrn->scrnIndex,
  208.                sna->kgem.has_vmap ? X_CONFIG : X_PROBED,
  209.                "Disabling use of vmap\n");
  210.         sna->kgem.has_vmap = 0;
  211.     }
  212. */
  213.  
  214.     /* Disable tiling by default */
  215.     sna->tiling = SNA_TILING_DISABLE;
  216.  
  217.     /* Default fail-safe value of 75 Hz */
  218. //    sna->vblank_interval = 1000 * 1000 * 1000 / 75;
  219.  
  220.     sna->flags = 0;
  221.  
  222.     sna_accel_init(sna);
  223.  
  224.     tls_mask = tls_alloc();
  225.    
  226. //    printf("tls mask %x\n", tls_mask);
  227.    
  228. done:
  229.     caps = sna_device->render.caps;
  230.  
  231. err1:
  232.     __lock_release_recursive(__sna_lock);
  233.    
  234.     return caps;    
  235. }
  236.  
  237. void sna_fini()
  238. {
  239.     if( sna_device )
  240.     {
  241.         struct kgem_bo *mask;
  242.        
  243.         __lock_acquire_recursive(__sna_lock);
  244.        
  245.         mask = tls_get(tls_mask);
  246.        
  247.         sna_device->render.fini(sna_device);
  248.         if(mask)
  249.             kgem_bo_destroy(&sna_device->kgem, mask);
  250.         kgem_close_batches(&sna_device->kgem);        
  251.             kgem_cleanup_cache(&sna_device->kgem);
  252.            
  253.             sna_device = NULL;
  254.         __lock_release_recursive(__sna_lock);
  255.     };
  256. }
  257.  
  258. #if 0
  259.  
  260. static bool sna_solid_cache_init(struct sna *sna)
  261. {
  262.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  263.  
  264.     DBG(("%s\n", __FUNCTION__));
  265.  
  266.     cache->cache_bo =
  267.         kgem_create_linear(&sna->kgem, sizeof(cache->color));
  268.     if (!cache->cache_bo)
  269.         return FALSE;
  270.  
  271.     /*
  272.      * Initialise [0] with white since it is very common and filling the
  273.      * zeroth slot simplifies some of the checks.
  274.      */
  275.     cache->color[0] = 0xffffffff;
  276.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  277.     cache->bo[0]->pitch = 4;
  278.     cache->dirty = 1;
  279.     cache->size = 1;
  280.     cache->last = 0;
  281.  
  282.     return TRUE;
  283. }
  284.  
  285. void
  286. sna_render_flush_solid(struct sna *sna)
  287. {
  288.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  289.  
  290.     DBG(("sna_render_flush_solid(size=%d)\n", cache->size));
  291.     assert(cache->dirty);
  292.     assert(cache->size);
  293.  
  294.     kgem_bo_write(&sna->kgem, cache->cache_bo,
  295.               cache->color, cache->size*sizeof(uint32_t));
  296.     cache->dirty = 0;
  297.     cache->last = 0;
  298. }
  299.  
  300. static void
  301. sna_render_finish_solid(struct sna *sna, bool force)
  302. {
  303.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  304.     int i;
  305.  
  306.     DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n",
  307.          force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty));
  308.  
  309.     if (!force && cache->cache_bo->domain != DOMAIN_GPU)
  310.         return;
  311.  
  312.     if (cache->dirty)
  313.         sna_render_flush_solid(sna);
  314.  
  315.     for (i = 0; i < cache->size; i++) {
  316.         if (cache->bo[i] == NULL)
  317.             continue;
  318.  
  319.         kgem_bo_destroy(&sna->kgem, cache->bo[i]);
  320.         cache->bo[i] = NULL;
  321.     }
  322.     kgem_bo_destroy(&sna->kgem, cache->cache_bo);
  323.  
  324.     DBG(("sna_render_finish_solid reset\n"));
  325.  
  326.     cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color));
  327.     cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t));
  328.     cache->bo[0]->pitch = 4;
  329.     if (force)
  330.         cache->size = 1;
  331. }
  332.  
  333.  
  334. struct kgem_bo *
  335. sna_render_get_solid(struct sna *sna, uint32_t color)
  336. {
  337.     struct sna_solid_cache *cache = &sna->render.solid_cache;
  338.     int i;
  339.  
  340.     DBG(("%s: %08x\n", __FUNCTION__, color));
  341.  
  342. //    if ((color & 0xffffff) == 0) /* alpha only */
  343. //        return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]);
  344.  
  345.     if (color == 0xffffffff) {
  346.         DBG(("%s(white)\n", __FUNCTION__));
  347.         return kgem_bo_reference(cache->bo[0]);
  348.     }
  349.  
  350.     if (cache->color[cache->last] == color) {
  351.         DBG(("sna_render_get_solid(%d) = %x (last)\n",
  352.              cache->last, color));
  353.         return kgem_bo_reference(cache->bo[cache->last]);
  354.     }
  355.  
  356.     for (i = 1; i < cache->size; i++) {
  357.         if (cache->color[i] == color) {
  358.             if (cache->bo[i] == NULL) {
  359.                 DBG(("sna_render_get_solid(%d) = %x (recreate)\n",
  360.                      i, color));
  361.                 goto create;
  362.             } else {
  363.                 DBG(("sna_render_get_solid(%d) = %x (old)\n",
  364.                      i, color));
  365.                 goto done;
  366.             }
  367.         }
  368.     }
  369.  
  370.     sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color));
  371.  
  372.     i = cache->size++;
  373.     cache->color[i] = color;
  374.     cache->dirty = 1;
  375.     DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color));
  376.  
  377. create:
  378.     cache->bo[i] = kgem_create_proxy(cache->cache_bo,
  379.                      i*sizeof(uint32_t), sizeof(uint32_t));
  380.     cache->bo[i]->pitch = 4;
  381.  
  382. done:
  383.     cache->last = i;
  384.     return kgem_bo_reference(cache->bo[i]);
  385. }
  386.  
  387. #endif
  388.  
  389.  
  390. int sna_blit_copy(bitmap_t *src_bitmap, int dst_x, int dst_y,
  391.                   int w, int h, int src_x, int src_y)
  392.  
  393. {
  394.     struct sna_copy_op copy;
  395.     struct _Pixmap src, dst;
  396.     struct kgem_bo *src_bo;
  397.  
  398.     char proc_info[1024];
  399.     int winx, winy;
  400.  
  401.     get_proc_info(proc_info);
  402.  
  403.     winx = *(uint32_t*)(proc_info+34);
  404.     winy = *(uint32_t*)(proc_info+38);
  405.    
  406.     memset(&src, 0, sizeof(src));
  407.     memset(&dst, 0, sizeof(dst));
  408.  
  409.     src.drawable.bitsPerPixel = 32;
  410.     src.drawable.width  = src_bitmap->width;
  411.     src.drawable.height = src_bitmap->height;
  412.  
  413.     dst.drawable.bitsPerPixel = 32;
  414.     dst.drawable.width  = sna_fb.width;
  415.     dst.drawable.height = sna_fb.height;
  416.    
  417.     memset(&copy, 0, sizeof(copy));
  418.  
  419.     src_bo = (struct kgem_bo*)src_bitmap->handle;
  420.    
  421.     if( sna_device->render.copy(sna_device, GXcopy,
  422.                                 &src, src_bo,
  423.                                 &dst, sna_fb.fb_bo, &copy) )
  424.     {                            
  425.         copy.blt(sna_device, &copy, src_x, src_y, w, h, winx+dst_x, winy+dst_y);
  426.         copy.done(sna_device, &copy);
  427.     }
  428.  
  429.     kgem_submit(&sna_device->kgem);
  430.    
  431.     return 0;
  432.    
  433. //    __asm__ __volatile__("int3");
  434.    
  435. };
  436.  
  437. typedef struct
  438. {
  439.     uint32_t        width;
  440.     uint32_t        height;
  441.     void           *data;
  442.     uint32_t        pitch;
  443.     struct kgem_bo *bo;    
  444.     uint32_t        bo_size;
  445.     uint32_t        flags;
  446. }surface_t;
  447.  
  448.  
  449.  
  450. int sna_create_bitmap(bitmap_t *bitmap)
  451. {
  452.     surface_t *sf;
  453.         struct kgem_bo *bo;
  454.    
  455.     sf = malloc(sizeof(*sf));
  456.     if(sf == NULL)
  457.         goto err_1;
  458.    
  459.     __lock_acquire_recursive(__sna_lock);
  460.  
  461.     bo = kgem_create_2d(&sna_device->kgem, bitmap->width, bitmap->height,
  462.                         32,I915_TILING_NONE, CREATE_CPU_MAP);
  463.    
  464.     if(bo == NULL)
  465.         goto err_2;
  466.      
  467.     void *map = kgem_bo_map(&sna_device->kgem, bo);
  468.     if(map == NULL)
  469.         goto err_3;
  470.        
  471.     sf->width   = bitmap->width;
  472.     sf->height  = bitmap->height;
  473.     sf->data    = map;
  474.     sf->pitch   = bo->pitch;
  475.     sf->bo      = bo;
  476.     sf->bo_size = PAGE_SIZE * bo->size.pages.count;
  477.     sf->flags   = bitmap->flags;
  478.    
  479.     bitmap->handle = (uint32_t)sf;
  480.     __lock_release_recursive(__sna_lock);
  481.    
  482.     return 0;
  483.    
  484. err_3:
  485.     kgem_bo_destroy(&sna_device->kgem, bo);
  486. err_2:
  487.     __lock_release_recursive(__sna_lock);
  488.     free(sf);    
  489. err_1:
  490.     return -1;
  491. };
  492.  
  493. int sna_destroy_bitmap(bitmap_t *bitmap)
  494. {
  495.     surface_t *sf = to_surface(bitmap);
  496.    
  497.     __lock_acquire_recursive(__sna_lock);
  498.    
  499.     kgem_bo_destroy(&sna_device->kgem, sf->bo);
  500.            
  501.     __lock_release_recursive(__sna_lock);
  502.  
  503.     free(sf);
  504.    
  505.     bitmap->handle = -1;
  506.     bitmap->data   = (void*)-1;
  507.     bitmap->pitch  = -1;
  508.  
  509.     return 0;
  510. };
  511.  
  512. int sna_lock_bitmap(bitmap_t *bitmap)
  513. {
  514.     surface_t *sf = to_surface(bitmap);    
  515.    
  516. //    printf("%s\n", __FUNCTION__);
  517.     __lock_acquire_recursive(__sna_lock);
  518.    
  519.     kgem_bo_sync__cpu(&sna_device->kgem, sf->bo);
  520.  
  521.     __lock_release_recursive(__sna_lock);
  522.        
  523.     bitmap->data  = sf->data;
  524.     bitmap->pitch = sf->pitch;    
  525.  
  526.     return 0;
  527. };
  528.  
  529. int sna_resize_bitmap(bitmap_t *bitmap)
  530. {
  531.     surface_t *sf = to_surface(bitmap);
  532.     struct kgem *kgem = &sna_device->kgem;
  533.     struct kgem_bo *bo = sf->bo;    
  534.    
  535.     uint32_t   size;
  536.     uint32_t   pitch;
  537.  
  538.         bitmap->pitch = -1;
  539.     bitmap->data = (void *) -1;
  540.  
  541.         size = kgem_surface_size(kgem,kgem->has_relaxed_fencing, CREATE_CPU_MAP,
  542.                                  bitmap->width, bitmap->height, 32, I915_TILING_NONE, &pitch);
  543.         assert(size && size <= kgem->max_object_size);
  544.        
  545.     if(sf->bo_size >= size)
  546.     {
  547.         sf->width   = bitmap->width;
  548.         sf->height  = bitmap->height;
  549.         sf->pitch   = pitch;
  550.         bo->pitch   = pitch;
  551.        
  552.             return 0;
  553.     }
  554.     else
  555.     {
  556.         __lock_acquire_recursive(__sna_lock);
  557.        
  558.         sna_bo_destroy(kgem, bo);
  559.        
  560.         sf->bo = NULL;
  561.        
  562.         bo = kgem_create_2d(kgem, bitmap->width, bitmap->height,
  563.                             32, I915_TILING_NONE, CREATE_CPU_MAP);
  564.  
  565.         if(bo == NULL)
  566.         {
  567.             __lock_release_recursive(__sna_lock);
  568.             return -1;
  569.         };
  570.        
  571.         void *map = kgem_bo_map(kgem, bo);
  572.         if(map == NULL)
  573.         {
  574.             sna_bo_destroy(kgem, bo);
  575.             __lock_release_recursive(__sna_lock);
  576.             return -1;
  577.         };
  578.        
  579.         __lock_release_recursive(__sna_lock);
  580.        
  581.         sf->width   = bitmap->width;
  582.         sf->height  = bitmap->height;
  583.         sf->data    = map;
  584.         sf->pitch   = bo->pitch;
  585.         sf->bo      = bo;
  586.         sf->bo_size = PAGE_SIZE * bo->size.pages.count;
  587.     }
  588.  
  589.     return 0;    
  590. };
  591.  
  592.  
  593.  
  594. int sna_create_mask()
  595. {
  596.         struct kgem_bo *bo;
  597.  
  598. //    printf("%s width %d height %d\n", __FUNCTION__, sna_fb.width, sna_fb.height);
  599.    
  600.     __lock_acquire_recursive(__sna_lock);
  601.    
  602.     bo = kgem_create_2d(&sna_device->kgem, sna_fb.width, sna_fb.height,
  603.                         8,I915_TILING_NONE, CREATE_CPU_MAP);
  604.    
  605.     if(unlikely(bo == NULL))
  606.         goto err_1;
  607.      
  608.     int *map = kgem_bo_map(&sna_device->kgem, bo);
  609.     if(map == NULL)
  610.         goto err_2;
  611.        
  612.     __lock_release_recursive(__sna_lock);
  613.        
  614.     memset(map, 0, bo->pitch * sna_fb.height);
  615.    
  616.     tls_set(tls_mask, bo);
  617.    
  618.     return 0;
  619.    
  620. err_2:
  621.     kgem_bo_destroy(&sna_device->kgem, bo);
  622. err_1:
  623.     __lock_release_recursive(__sna_lock);
  624.     return -1;
  625. };
  626.  
  627.  
  628. bool
  629. gen6_composite(struct sna *sna,
  630.               uint8_t op,
  631.                       PixmapPtr src, struct kgem_bo *src_bo,
  632.                       PixmapPtr mask,struct kgem_bo *mask_bo,
  633.                       PixmapPtr dst, struct kgem_bo *dst_bo,
  634.               int32_t src_x, int32_t src_y,
  635.               int32_t msk_x, int32_t msk_y,
  636.               int32_t dst_x, int32_t dst_y,
  637.               int32_t width, int32_t height,
  638.               struct sna_composite_op *tmp);
  639.  
  640.  
  641. #define MAP(ptr) ((void*)((uintptr_t)(ptr) & ~3))
  642.  
  643. int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
  644.                   int w, int h, int src_x, int src_y)
  645.  
  646. {
  647.     surface_t *sf = to_surface(bitmap);    
  648.  
  649.     struct drm_i915_mask_update update;
  650.    
  651.     struct sna_composite_op composite;
  652.     struct _Pixmap src, dst, mask;
  653.     struct kgem_bo *src_bo, *mask_bo;
  654.     int winx, winy;
  655.  
  656.     char proc_info[1024];
  657.  
  658.     get_proc_info(proc_info);
  659.  
  660.     winx = *(uint32_t*)(proc_info+34);
  661.     winy = *(uint32_t*)(proc_info+38);
  662. //    winw = *(uint32_t*)(proc_info+42)+1;
  663. //    winh = *(uint32_t*)(proc_info+46)+1;
  664.    
  665.     mask_bo = tls_get(tls_mask);
  666.    
  667.     if(unlikely(mask_bo == NULL))
  668.     {
  669.         sna_create_mask();
  670.         mask_bo = tls_get(tls_mask);
  671.         if( mask_bo == NULL)
  672.             return -1;  
  673.     };
  674.    
  675.     if(kgem_update_fb(&sna_device->kgem, &sna_fb))
  676.     {
  677.         __lock_acquire_recursive(__sna_lock);
  678.         kgem_bo_destroy(&sna_device->kgem, mask_bo);
  679.         __lock_release_recursive(__sna_lock);
  680.        
  681.         sna_create_mask();
  682.         mask_bo = tls_get(tls_mask);
  683.         if( mask_bo == NULL)
  684.             return -1;  
  685.     }
  686.    
  687.     VG_CLEAR(update);
  688.         update.handle = mask_bo->handle;
  689.         update.bo_map    = (__u32)MAP(mask_bo->map);
  690.         drmIoctl(sna_device->kgem.fd, SRV_MASK_UPDATE, &update);
  691.     mask_bo->pitch = update.bo_pitch;
  692.    
  693.     memset(&src, 0, sizeof(src));
  694.     memset(&dst, 0, sizeof(dst));
  695.     memset(&mask, 0, sizeof(dst));
  696.  
  697.     src.drawable.bitsPerPixel = 32;
  698.    
  699.     src.drawable.width  = sf->width;
  700.     src.drawable.height = sf->height;
  701.  
  702.     dst.drawable.bitsPerPixel = 32;
  703.     dst.drawable.width  = sna_fb.width;
  704.     dst.drawable.height = sna_fb.height;
  705.    
  706.     mask.drawable.bitsPerPixel = 8;
  707.     mask.drawable.width  = update.width;
  708.     mask.drawable.height = update.height;
  709.  
  710.     memset(&composite, 0, sizeof(composite));
  711.  
  712.     src_bo = sf->bo;
  713.    
  714.     __lock_acquire_recursive(__sna_lock);
  715.  
  716.    
  717.     if( sna_device->render.blit_tex(sna_device, PictOpSrc,scale,
  718.                       &src, src_bo,
  719.                       &mask, mask_bo,
  720.                       &dst, sna_fb.fb_bo,
  721.               src_x, src_y,
  722.               dst_x, dst_y,
  723.               winx+dst_x, winy+dst_y,
  724.               w, h,
  725.               &composite) )
  726.     {
  727.             struct sna_composite_rectangles r;
  728.        
  729.             r.src.x = src_x;
  730.             r.src.y = src_y;
  731.             r.mask.x = dst_x;
  732.             r.mask.y = dst_y;
  733.                 r.dst.x = winx+dst_x;
  734.             r.dst.y = winy+dst_y;
  735.             r.width  = w;
  736.             r.height = h;
  737.        
  738.         composite.blt(sna_device, &composite, &r);
  739.         composite.done(sna_device, &composite);
  740.        
  741.     };
  742.    
  743.     kgem_submit(&sna_device->kgem);
  744.  
  745.     __lock_release_recursive(__sna_lock);
  746.  
  747.     bitmap->data   = (void*)-1;
  748.     bitmap->pitch  = -1;
  749.  
  750.     return 0;            
  751. }
  752.  
  753.  
  754.  
  755.  
  756.  
  757.  
  758.  
  759.  
  760.  
  761. static const struct intel_device_info intel_generic_info = {
  762.         .gen = -1,
  763. };
  764.  
  765. static const struct intel_device_info intel_i915_info = {
  766.         .gen = 030,
  767. };
  768. static const struct intel_device_info intel_i945_info = {
  769.         .gen = 031,
  770. };
  771.  
  772. static const struct intel_device_info intel_g33_info = {
  773.         .gen = 033,
  774. };
  775.  
  776. static const struct intel_device_info intel_i965_info = {
  777.         .gen = 040,
  778. };
  779.  
  780. static const struct intel_device_info intel_g4x_info = {
  781.         .gen = 045,
  782. };
  783.  
  784. static const struct intel_device_info intel_ironlake_info = {
  785.         .gen = 050,
  786. };
  787.  
  788. static const struct intel_device_info intel_sandybridge_info = {
  789.         .gen = 060,
  790. };
  791.  
  792. static const struct intel_device_info intel_ivybridge_info = {
  793.         .gen = 070,
  794. };
  795.  
  796. static const struct intel_device_info intel_valleyview_info = {
  797.         .gen = 071,
  798. };
  799.  
  800. static const struct intel_device_info intel_haswell_info = {
  801.         .gen = 075,
  802. };
  803.  
  804. #define INTEL_DEVICE_MATCH(d,i) \
  805.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  806.  
  807.  
  808. static const struct pci_id_match intel_device_match[] = {
  809.  
  810.  
  811.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_G, &intel_i915_info ),
  812.         INTEL_DEVICE_MATCH (PCI_CHIP_E7221_G, &intel_i915_info ),
  813.         INTEL_DEVICE_MATCH (PCI_CHIP_I915_GM, &intel_i915_info ),
  814.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_G, &intel_i945_info ),
  815.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GM, &intel_i945_info ),
  816.         INTEL_DEVICE_MATCH (PCI_CHIP_I945_GME, &intel_i945_info ),
  817.  
  818.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_M, &intel_g33_info ),
  819.         INTEL_DEVICE_MATCH (PCI_CHIP_PINEVIEW_G, &intel_g33_info ),
  820.         INTEL_DEVICE_MATCH (PCI_CHIP_G33_G, &intel_g33_info ),
  821.         INTEL_DEVICE_MATCH (PCI_CHIP_Q33_G, &intel_g33_info ),
  822.         /* Another marketing win: Q35 is another g33 device not a gen4 part
  823.          * like its G35 brethren.
  824.          */
  825.         INTEL_DEVICE_MATCH (PCI_CHIP_Q35_G, &intel_g33_info ),
  826.  
  827.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_G, &intel_i965_info ),
  828.         INTEL_DEVICE_MATCH (PCI_CHIP_G35_G, &intel_i965_info ),
  829.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_Q, &intel_i965_info ),
  830.         INTEL_DEVICE_MATCH (PCI_CHIP_I946_GZ, &intel_i965_info ),
  831.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GM, &intel_i965_info ),
  832.         INTEL_DEVICE_MATCH (PCI_CHIP_I965_GME, &intel_i965_info ),
  833.  
  834.         INTEL_DEVICE_MATCH (PCI_CHIP_GM45_GM, &intel_g4x_info ),
  835.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_E_G, &intel_g4x_info ),
  836.         INTEL_DEVICE_MATCH (PCI_CHIP_G45_G, &intel_g4x_info ),
  837.         INTEL_DEVICE_MATCH (PCI_CHIP_Q45_G, &intel_g4x_info ),
  838.         INTEL_DEVICE_MATCH (PCI_CHIP_G41_G, &intel_g4x_info ),
  839.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G, &intel_g4x_info ),
  840.         INTEL_DEVICE_MATCH (PCI_CHIP_B43_G1, &intel_g4x_info ),
  841.  
  842.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_D_G, &intel_ironlake_info ),
  843.         INTEL_DEVICE_MATCH (PCI_CHIP_IRONLAKE_M_G, &intel_ironlake_info ),
  844.  
  845.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT1, &intel_sandybridge_info ),
  846.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2, &intel_sandybridge_info ),
  847.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_GT2_PLUS, &intel_sandybridge_info ),
  848.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT1, &intel_sandybridge_info ),
  849.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2, &intel_sandybridge_info ),
  850.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS, &intel_sandybridge_info ),
  851.         INTEL_DEVICE_MATCH (PCI_CHIP_SANDYBRIDGE_S_GT, &intel_sandybridge_info ),
  852.  
  853.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT1, &intel_ivybridge_info ),
  854.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_M_GT2, &intel_ivybridge_info ),
  855.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT1, &intel_ivybridge_info ),
  856.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_D_GT2, &intel_ivybridge_info ),
  857.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT1, &intel_ivybridge_info ),
  858.         INTEL_DEVICE_MATCH (PCI_CHIP_IVYBRIDGE_S_GT2, &intel_ivybridge_info ),
  859.  
  860.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT1, &intel_haswell_info ),
  861.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2, &intel_haswell_info ),
  862.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_D_GT2_PLUS, &intel_haswell_info ),
  863.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT1, &intel_haswell_info ),
  864.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2, &intel_haswell_info ),
  865.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_M_GT2_PLUS, &intel_haswell_info ),
  866.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT1, &intel_haswell_info ),
  867.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2, &intel_haswell_info ),
  868.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_S_GT2_PLUS, &intel_haswell_info ),
  869.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT1, &intel_haswell_info ),
  870.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2, &intel_haswell_info ),
  871.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_D_GT2_PLUS, &intel_haswell_info ),
  872.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT1, &intel_haswell_info ),
  873.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2, &intel_haswell_info ),
  874.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_M_GT2_PLUS, &intel_haswell_info ),
  875.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT1, &intel_haswell_info ),
  876.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2, &intel_haswell_info ),
  877.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_SDV_S_GT2_PLUS, &intel_haswell_info ),
  878.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT1, &intel_haswell_info ),
  879.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2, &intel_haswell_info ),
  880.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_D_GT2_PLUS, &intel_haswell_info ),
  881.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT1, &intel_haswell_info ),
  882.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2, &intel_haswell_info ),
  883.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_M_GT2_PLUS, &intel_haswell_info ),
  884.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT1, &intel_haswell_info ),
  885.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2, &intel_haswell_info ),
  886.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_ULT_S_GT2_PLUS, &intel_haswell_info ),
  887.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT1, &intel_haswell_info ),
  888.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2, &intel_haswell_info ),
  889.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_D_GT2_PLUS, &intel_haswell_info ),
  890.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT1, &intel_haswell_info ),
  891.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2, &intel_haswell_info ),
  892.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_M_GT2_PLUS, &intel_haswell_info ),
  893.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT1, &intel_haswell_info ),
  894.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2, &intel_haswell_info ),
  895.         INTEL_DEVICE_MATCH (PCI_CHIP_HASWELL_CRW_S_GT2_PLUS, &intel_haswell_info ),
  896.  
  897.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_PO, &intel_valleyview_info ),
  898.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_1, &intel_valleyview_info ),
  899.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_2, &intel_valleyview_info ),
  900.         INTEL_DEVICE_MATCH (PCI_CHIP_VALLEYVIEW_3, &intel_valleyview_info ),
  901.  
  902.         INTEL_DEVICE_MATCH (PCI_MATCH_ANY, &intel_generic_info ),
  903.  
  904.         { 0, 0, 0 },
  905. };
  906.  
  907. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  908. {
  909.     while(list->device_id)
  910.     {
  911.         if(dev==list->device_id)
  912.             return list;
  913.         list++;
  914.     }
  915.     return NULL;
  916. }
  917.  
  918. const struct intel_device_info *
  919. intel_detect_chipset(struct pci_device *pci)
  920. {
  921.     const struct pci_id_match *ent = NULL;
  922.  
  923.     ent = PciDevMatch(pci->device_id, intel_device_match);
  924.    
  925.     if(ent != NULL)
  926.         return (const struct intel_device_info*)ent->match_data;
  927.     else    
  928.         return &intel_generic_info;
  929.        
  930. #if 0        
  931.         for (i = 0; intel_chipsets[i].name != NULL; i++) {
  932.                 if (DEVICE_ID(pci) == intel_chipsets[i].token) {
  933.                         name = intel_chipsets[i].name;
  934.                         break;
  935.                 }
  936.         }
  937.         if (name == NULL) {
  938.                 xf86DrvMsg(scrn->scrnIndex, X_WARNING, "unknown chipset\n");
  939.                 name = "unknown";
  940.         } else {
  941.                 xf86DrvMsg(scrn->scrnIndex, from,
  942.                            "Integrated Graphics Chipset: Intel(R) %s\n",
  943.                            name);
  944.         }
  945.  
  946.         scrn->chipset = name;
  947. #endif
  948.        
  949. }
  950.  
  951.  
  952. int drmIoctl(int fd, unsigned long request, void *arg)
  953. {
  954.     ioctl_t  io;
  955.  
  956.     io.handle   = fd;
  957.     io.io_code  = request;
  958.     io.input    = arg;
  959.     io.inp_size = 64;
  960.     io.output   = NULL;
  961.     io.out_size = 0;
  962.  
  963.     return call_service(&io);
  964. }
  965.  
  966.  
  967.  
  968.