Subversion Repositories Kolibri OS

Rev

Rev 4315 | Go to most recent revision | Blame | Last modification | View Log | Download | RSS feed

  1.  
  2. #include <assert.h>
  3. #include <stdlib.h>
  4. #include <memory.h>
  5.  
  6. #include <intel_bufmgr.h>
  7. //#include "xf86.h"
  8. #include "uxa/intel.h"
  9. #include "i830_reg.h"
  10. #include "i965_reg.h"
  11.  
  12. /* bring in brw structs */
  13. #include "brw_defines.h"
  14. #include "brw_structs.h"
  15.  
  16. #include "i915_pciids.h"
  17. #include <pixlib2.h>
  18. #include <kos32sys.h>
  19.  
  20. #define PictOpClear             0
  21. #define PictOpSrc               1
  22. #define PictOpDst               2
  23. #define PictOpOver              3
  24. #define PictOpOverReverse       4
  25. #define PictOpIn                5
  26. #define PictOpInReverse         6
  27. #define PictOpOut               7
  28. #define PictOpOutReverse        8
  29. #define PictOpAtop              9
  30. #define PictOpAtopReverse       10
  31. #define PictOpXor               11
  32. #define PictOpAdd               12
  33. #define PictOpSaturate          13
  34. #define PictOpMaximum           13
  35.  
  36. static int    tls_mask;
  37.  
  38. intel_screen_private *driverPrivate;
  39. __LOCK_INIT_RECURSIVE(, __uxa_lock);
  40.  
  41. #define DBG printf
  42.  
  43. typedef struct
  44. {
  45.     struct list     entry;
  46.     uint32_t        width;
  47.     uint32_t        height;
  48.     void           *data;
  49.     uint32_t        pitch;
  50.     drm_intel_bo   *bo;
  51.     uint32_t        bo_size;
  52.     uint32_t        flags;
  53. }surface_t;
  54.  
  55. #define to_surface(x) (surface_t*)((x)->handle)
  56.  
  57. struct _Pixmap fb_pixmap;
  58.  
  59. struct list sf_list;
  60.  
  61. int uxa_update_fb(struct intel_screen_private *intel);
  62.  
  63. int sna_create_mask()
  64. {
  65.     return 0;
  66. };
  67.  
  68. static void i830_done_composite(PixmapPtr dest)
  69. {
  70.         intel_screen_private *intel = intel_get_screen_private();
  71.  
  72.         if (intel->vertex_flush)
  73.                 intel->vertex_flush(intel);
  74.  
  75. //      intel_debug_flush(scrn);
  76. }
  77.  
  78. int sna_bitmap_from_handle(bitmap_t *bitmap, uint32_t handle)
  79. {
  80.         struct intel_screen_private *intel = intel_get_screen_private();
  81.         drm_intel_bo *bo;
  82.     surface_t    *sf;
  83.     unsigned int size;
  84.  
  85.     bitmap->handle = 0;
  86.  
  87.     __lock_acquire_recursive(__uxa_lock);
  88.     list_for_each_entry(sf, &sf_list, entry)
  89.     {
  90.         if (sf->bo->handle == handle)
  91.         {
  92.             bitmap->handle = (uint32_t)sf;
  93.             break;
  94.         }
  95.     }
  96.     __lock_release_recursive(__uxa_lock);
  97.  
  98.     if(bitmap->handle)
  99.         return 0;
  100.  
  101.     sf = malloc(sizeof(*sf));
  102.     if(sf == NULL)
  103.         goto err_1;
  104.  
  105.     size = bitmap->pitch * bitmap->height;
  106.  
  107.     bo = bo_create_from_gem_handle(intel->bufmgr, size, handle);
  108.  
  109.     sf->width   = bitmap->width;
  110.     sf->height  = bitmap->height;
  111.     sf->data    = NULL;
  112.     sf->pitch   = bitmap->pitch;
  113.     sf->bo      = bo;
  114.     sf->bo_size = size;
  115.     sf->flags   = bitmap->flags;
  116.  
  117.     bitmap->handle = (uint32_t)sf;
  118.  
  119.     return 0;
  120.  
  121. err_1:
  122.  
  123.     return -1;
  124. };
  125.  
  126. void sna_set_bo_handle(bitmap_t *bitmap, int handle)
  127. {
  128.     sna_bitmap_from_handle(bitmap, handle);
  129. };
  130.  
  131.  
  132. int sna_blit_tex(bitmap_t *bitmap, bool scale, int dst_x, int dst_y,
  133.                   int w, int h, int src_x, int src_y)
  134. {
  135. //    DBG("%s\n", __FUNCTION__);
  136.  
  137.     struct _Pixmap pixSrc, pixMask;
  138.     struct intel_pixmap privSrc;
  139.     struct _Picture pictSrc, pictDst;
  140.         struct intel_screen_private *intel = intel_get_screen_private();
  141.  
  142.     surface_t *sf = to_surface(bitmap);
  143.  
  144.     int winx, winy;
  145.  
  146.     char proc_info[1024];
  147.     get_proc_info(proc_info);
  148.     winx = *(uint32_t*)(proc_info+34);
  149.     winy = *(uint32_t*)(proc_info+38);
  150.  
  151.     memset(&pixSrc,  0, sizeof(pixSrc));
  152.     memset(&pixMask, 0, sizeof(pixMask));
  153.     memset(&privSrc, 0, sizeof(pixSrc));
  154.  
  155.     memset(&pictSrc, 0, sizeof(pictSrc));
  156.     memset(&pictDst, 0, sizeof(pictDst));
  157.  
  158.     pixSrc.drawable.bitsPerPixel = 32;
  159.     pixSrc.drawable.width        = sf->width;
  160.     pixSrc.drawable.height       = sf->height;
  161.     pixSrc.devKind               = sf->pitch;
  162.     pixSrc.private               = &privSrc;
  163.  
  164.     list_init(&privSrc.batch);
  165.     privSrc.bo = sf->bo;
  166.     privSrc.stride = sf->pitch;
  167.     privSrc.tiling = I915_TILING_X;
  168.  
  169.     pictSrc.format     = PICT_x8r8g8b8;
  170.     pictSrc.filter     = PictFilterNearest;
  171.     pictSrc.repeatType = RepeatNone;
  172.  
  173.     pictDst.format     = PICT_a8r8g8b8;
  174.     pictDst.filter = PictFilterNearest;
  175.     pictDst.repeatType = RepeatNone;
  176.  
  177.     uxa_update_fb(intel);
  178.  
  179.     i965_prepare_composite(PictOpSrc, &pictSrc, NULL, &pictDst,
  180.                            &pixSrc, NULL, &fb_pixmap);
  181.  
  182.     i965_composite(&fb_pixmap, src_x, src_y, 0, 0,
  183.                     dst_x+winx, dst_y+winy, w, h);
  184.  
  185.     i830_done_composite(&fb_pixmap);
  186.  
  187.         intel_batch_submit();
  188.  
  189. //    DBG("%s done\n", __FUNCTION__);
  190.  
  191.     return 0;
  192. };
  193.  
  194.  
  195. int uxa_init_fb(struct intel_screen_private *intel)
  196. {
  197.     struct drm_i915_fb_info fb;
  198.     static struct intel_pixmap ipix;
  199.     int ret;
  200.  
  201.     memset(&fb, 0, sizeof(fb));
  202.  
  203.     ret = drmIoctl(intel->scrn, SRV_FBINFO, &fb);
  204.         if( ret != 0 )
  205.             return ret;
  206.  
  207.     intel->front_buffer = intel_bo_gem_create_from_name(intel->bufmgr,"frontbuffer", fb.name);
  208.     if(intel->front_buffer == NULL)
  209.         return -1;
  210.  
  211.     ipix.bo = intel->front_buffer;
  212.     list_init(&ipix.batch);
  213.     ipix.stride = fb.pitch;
  214.     ipix.tiling = fb.tiling;
  215.     ipix.pinned = PIN_SCANOUT;
  216.  
  217.     printf("create frontbuffer name %d bo %x\n", fb.name, ipix.bo);
  218.     printf("size %d, offset %d handle %d\n",ipix.bo->size, ipix.bo->offset, ipix.bo->handle);
  219.  
  220.     fb_pixmap.drawable.bitsPerPixel = 32;
  221.     fb_pixmap.drawable.width  = fb.width;
  222.     fb_pixmap.drawable.height = fb.height;
  223.     fb_pixmap.devKind = fb.pitch;
  224.     fb_pixmap.private = &ipix;
  225.  
  226.     return 0;
  227. }
  228.  
  229. int uxa_update_fb(struct intel_screen_private *intel)
  230. {
  231.     struct drm_i915_fb_info fb;
  232.     struct intel_pixmap *ipix;
  233.     size_t size;
  234.     int ret;
  235.  
  236. //    DBG("%s\n", __FUNCTION__);
  237.  
  238.     ret = drmIoctl(intel->scrn, SRV_FBINFO, &fb);
  239.         if( ret != 0 )
  240.             return ret;
  241.  
  242.     ipix = (struct intel_pixmap*)fb_pixmap.private;
  243.  
  244.     list_init(&ipix->batch);
  245.     ipix->stride = fb.pitch;
  246.     ipix->tiling = fb.tiling;
  247.  
  248.     fb_pixmap.drawable.width  = fb.width;
  249.     fb_pixmap.drawable.height = fb.height;
  250.     fb_pixmap.devKind = fb.pitch;
  251.  
  252.     return 0;
  253. };
  254.  
  255. int uxa_init(uint32_t service)
  256. {
  257.     static struct pci_device device;
  258.         struct intel_screen_private *intel = intel_get_screen_private();
  259.  
  260.     ioctl_t   io;
  261.     int caps = 0;
  262.  
  263.     DBG("%s\n", __FUNCTION__);
  264.  
  265.     __lock_acquire_recursive(__uxa_lock);
  266.  
  267.     if(intel)
  268.         goto done;
  269.  
  270.     io.handle   = service;
  271.     io.io_code  = SRV_GET_PCI_INFO;
  272.     io.input    = &device;
  273.     io.inp_size = sizeof(device);
  274.     io.output   = NULL;
  275.     io.out_size = 0;
  276.  
  277.     if (call_service(&io)!=0)
  278.         goto err1;
  279.  
  280.     intel = (intel_screen_private*)malloc(sizeof(*intel));
  281.     if (intel == NULL)
  282.         goto err1;
  283.  
  284.     list_init(&sf_list);
  285.  
  286.     driverPrivate = intel;
  287.     memset(intel, 0, sizeof(*intel));
  288.  
  289. //    sna->cpu_features = sna_cpu_detect();
  290.  
  291.     intel->PciInfo = &device;
  292.         intel->info = intel_detect_chipset(intel->PciInfo);
  293.     intel->scrn = service;
  294.  
  295.     intel->bufmgr = intel_bufmgr_gem_init(service, 8192);
  296.     if(intel->bufmgr == NULL)
  297.     {
  298.                 printf("Memory manager initialization failed\n");
  299.                 goto err1;
  300.     };
  301.  
  302.         list_init(&intel->batch_pixmaps);
  303.  
  304.         if ((INTEL_INFO(intel)->gen == 060)) {
  305.                 intel->wa_scratch_bo =
  306.                         drm_intel_bo_alloc(intel->bufmgr, "wa scratch",
  307.                                            4096, 4096);
  308.         }
  309.  
  310.     if( uxa_init_fb(intel) != 0)
  311.         goto err1;
  312.  
  313.         intel_batch_init();
  314.  
  315.         if (INTEL_INFO(intel)->gen >= 040)
  316.                 gen4_render_state_init();
  317.  
  318.         if (!intel_uxa_init()) {
  319.                 printf("Hardware acceleration initialization failed\n");
  320.                 goto err1;
  321.         }
  322.  
  323.     tls_mask = tls_alloc();
  324.  
  325. //    printf("tls mask %x\n", tls_mask);
  326.  
  327. done:
  328. //    caps = sna_device->render.caps;
  329.  
  330. err1:
  331.     __lock_release_recursive(__uxa_lock);
  332.  
  333.     LEAVE();
  334.     return caps;
  335. }
  336.  
  337.  
  338.  
  339. static void
  340. gen6_context_switch(intel_screen_private *intel,
  341.                     int new_mode)
  342. {
  343.         intel_batch_submit(intel->scrn);
  344. }
  345.  
  346. static void
  347. gen5_context_switch(intel_screen_private *intel,
  348.                     int new_mode)
  349. {
  350.         /* Ironlake has a limitation that a 3D or Media command can't
  351.          * be the first command after a BLT, unless it's
  352.          * non-pipelined.  Instead of trying to track it and emit a
  353.          * command at the right time, we just emit a dummy
  354.          * non-pipelined 3D instruction after each blit.
  355.          */
  356.  
  357.         if (new_mode == I915_EXEC_BLT) {
  358.                 OUT_BATCH(MI_FLUSH |
  359.                           MI_STATE_INSTRUCTION_CACHE_FLUSH |
  360.                           MI_INHIBIT_RENDER_CACHE_FLUSH);
  361.         } else {
  362.                 OUT_BATCH(CMD_POLY_STIPPLE_OFFSET << 16);
  363.                 OUT_BATCH(0);
  364.         }
  365. }
  366.  
  367. static void
  368. gen4_context_switch(intel_screen_private *intel,
  369.                     int new_mode)
  370. {
  371.         if (new_mode == I915_EXEC_BLT) {
  372.                 OUT_BATCH(MI_FLUSH |
  373.                           MI_STATE_INSTRUCTION_CACHE_FLUSH |
  374.                           MI_INHIBIT_RENDER_CACHE_FLUSH);
  375.         }
  376. }
  377.  
  378. static void
  379. intel_limits_init(intel_screen_private *intel)
  380. {
  381.         /* Limits are described in the BLT engine chapter under Graphics Data Size
  382.          * Limitations, and the descriptions of SURFACE_STATE, 3DSTATE_BUFFER_INFO,
  383.          * 3DSTATE_DRAWING_RECTANGLE, 3DSTATE_MAP_INFO, and 3DSTATE_MAP_INFO.
  384.          *
  385.          * i845 through i965 limits 2D rendering to 65536 lines and pitch of 32768.
  386.          *
  387.          * i965 limits 3D surface to (2*element size)-aligned offset if un-tiled.
  388.          * i965 limits 3D surface to 4kB-aligned offset if tiled.
  389.          * i965 limits 3D surfaces to w,h of ?,8192.
  390.          * i965 limits 3D surface to pitch of 1B - 128kB.
  391.          * i965 limits 3D surface pitch alignment to 1 or 2 times the element size.
  392.          * i965 limits 3D surface pitch alignment to 512B if tiled.
  393.          * i965 limits 3D destination drawing rect to w,h of 8192,8192.
  394.          *
  395.          * i915 limits 3D textures to 4B-aligned offset if un-tiled.
  396.          * i915 limits 3D textures to ~4kB-aligned offset if tiled.
  397.          * i915 limits 3D textures to width,height of 2048,2048.
  398.          * i915 limits 3D textures to pitch of 16B - 8kB, in dwords.
  399.          * i915 limits 3D destination to ~4kB-aligned offset if tiled.
  400.          * i915 limits 3D destination to pitch of 16B - 8kB, in dwords, if un-tiled.
  401.          * i915 limits 3D destination to pitch 64B-aligned if used with depth.
  402.          * i915 limits 3D destination to pitch of 512B - 8kB, in tiles, if tiled.
  403.          * i915 limits 3D destination to POT aligned pitch if tiled.
  404.          * i915 limits 3D destination drawing rect to w,h of 2048,2048.
  405.          *
  406.          * i845 limits 3D textures to 4B-aligned offset if un-tiled.
  407.          * i845 limits 3D textures to ~4kB-aligned offset if tiled.
  408.          * i845 limits 3D textures to width,height of 2048,2048.
  409.          * i845 limits 3D textures to pitch of 4B - 8kB, in dwords.
  410.          * i845 limits 3D destination to 4B-aligned offset if un-tiled.
  411.          * i845 limits 3D destination to ~4kB-aligned offset if tiled.
  412.          * i845 limits 3D destination to pitch of 8B - 8kB, in dwords.
  413.          * i845 limits 3D destination drawing rect to w,h of 2048,2048.
  414.          *
  415.          * For the tiled issues, the only tiled buffer we draw to should be
  416.          * the front, which will have an appropriate pitch/offset already set up,
  417.          * so UXA doesn't need to worry.
  418.          */
  419.         if (INTEL_INFO(intel)->gen >= 040) {
  420.                 intel->accel_pixmap_offset_alignment = 4 * 2;
  421.                 intel->accel_max_x = 8192;
  422.                 intel->accel_max_y = 8192;
  423.         } else {
  424.                 intel->accel_pixmap_offset_alignment = 4;
  425.                 intel->accel_max_x = 2048;
  426.                 intel->accel_max_y = 2048;
  427.         }
  428. }
  429.  
  430.  
  431. Bool intel_uxa_init()
  432. {
  433.         intel_screen_private *intel = intel_get_screen_private();
  434.  
  435.         intel_limits_init(intel);
  436.  
  437.         intel->prim_offset = 0;
  438.         intel->vertex_count = 0;
  439.         intel->vertex_offset = 0;
  440.         intel->vertex_used = 0;
  441.         intel->floats_per_vertex = 0;
  442.         intel->last_floats_per_vertex = 0;
  443.         intel->vertex_bo = NULL;
  444.         intel->surface_used = 0;
  445.         intel->surface_reloc = 0;
  446.  
  447. /*
  448.         intel->uxa_driver->check_composite = i965_check_composite;
  449.         intel->uxa_driver->check_composite_texture = i965_check_composite_texture;
  450.         intel->uxa_driver->prepare_composite = i965_prepare_composite;
  451.         intel->uxa_driver->composite = i965_composite;
  452.         intel->uxa_driver->done_composite = i830_done_composite;
  453. */
  454.         intel->vertex_flush = i965_vertex_flush;
  455.         intel->batch_flush = i965_batch_flush;
  456.         intel->batch_commit_notify = i965_batch_commit_notify;
  457.  
  458.         if (IS_GEN4(intel)) {
  459.                 intel->context_switch = gen4_context_switch;
  460.         } else if (IS_GEN5(intel)) {
  461.                 intel->context_switch = gen5_context_switch;
  462.         } else {
  463.                 intel->context_switch = gen6_context_switch;
  464.         }
  465.  
  466.         return TRUE;
  467. }
  468.  
  469.  
  470. static const struct intel_device_info intel_generic_info = {
  471.         .gen = -1,
  472. };
  473.  
  474. static const struct intel_device_info intel_i915_info = {
  475.         .gen = 030,
  476. };
  477. static const struct intel_device_info intel_i945_info = {
  478.         .gen = 031,
  479. };
  480.  
  481. static const struct intel_device_info intel_g33_info = {
  482.         .gen = 033,
  483. };
  484.  
  485. static const struct intel_device_info intel_i965_info = {
  486.         .gen = 040,
  487. };
  488.  
  489. static const struct intel_device_info intel_g4x_info = {
  490.         .gen = 045,
  491. };
  492.  
  493. static const struct intel_device_info intel_ironlake_info = {
  494.         .gen = 050,
  495. };
  496.  
  497. static const struct intel_device_info intel_sandybridge_info = {
  498.         .gen = 060,
  499. };
  500.  
  501. static const struct intel_device_info intel_ivybridge_info = {
  502.         .gen = 070,
  503. };
  504.  
  505. static const struct intel_device_info intel_valleyview_info = {
  506.         .gen = 071,
  507. };
  508.  
  509. static const struct intel_device_info intel_haswell_info = {
  510.         .gen = 075,
  511. };
  512.  
  513. #define INTEL_DEVICE_MATCH(d,i) \
  514.     { 0x8086, (d), PCI_MATCH_ANY, PCI_MATCH_ANY, 0x3 << 16, 0xff << 16, (intptr_t)(i) }
  515.  
  516.  
  517. static const struct pci_id_match intel_device_match[] = {
  518.  
  519.         INTEL_I915G_IDS(&intel_i915_info),
  520.         INTEL_I915GM_IDS(&intel_i915_info),
  521.         INTEL_I945G_IDS(&intel_i945_info),
  522.         INTEL_I945GM_IDS(&intel_i945_info),
  523.  
  524.         INTEL_G33_IDS(&intel_g33_info),
  525.         INTEL_PINEVIEW_IDS(&intel_g33_info),
  526.  
  527.         INTEL_I965G_IDS(&intel_i965_info),
  528.         INTEL_I965GM_IDS(&intel_i965_info),
  529.  
  530.         INTEL_G45_IDS(&intel_g4x_info),
  531.         INTEL_GM45_IDS(&intel_g4x_info),
  532.  
  533.         INTEL_IRONLAKE_D_IDS(&intel_ironlake_info),
  534.         INTEL_IRONLAKE_M_IDS(&intel_ironlake_info),
  535.  
  536.         INTEL_SNB_D_IDS(&intel_sandybridge_info),
  537.         INTEL_SNB_M_IDS(&intel_sandybridge_info),
  538.  
  539.         INTEL_IVB_D_IDS(&intel_ivybridge_info),
  540.         INTEL_IVB_M_IDS(&intel_ivybridge_info),
  541.  
  542.         INTEL_HSW_D_IDS(&intel_haswell_info),
  543.         INTEL_HSW_M_IDS(&intel_haswell_info),
  544.  
  545.         INTEL_VLV_D_IDS(&intel_valleyview_info),
  546.         INTEL_VLV_M_IDS(&intel_valleyview_info),
  547.  
  548.         INTEL_VGA_DEVICE(PCI_MATCH_ANY, &intel_generic_info),
  549.  
  550.         { 0, 0, 0 },
  551. };
  552.  
  553. const struct pci_id_match *PciDevMatch(uint16_t dev,const struct pci_id_match *list)
  554. {
  555.     while(list->device_id)
  556.     {
  557.         if(dev==list->device_id)
  558.             return list;
  559.         list++;
  560.     }
  561.     return NULL;
  562. }
  563.  
  564.  
  565. const struct intel_device_info *
  566. intel_detect_chipset(struct pci_device *pci)
  567. {
  568.     const struct pci_id_match *ent = NULL;
  569.  
  570.     ent = PciDevMatch(pci->device_id, intel_device_match);
  571.  
  572.     if(ent != NULL)
  573.         return (const struct intel_device_info*)ent->match_data;
  574.     else
  575.         return &intel_generic_info;
  576. }
  577.  
  578.