Subversion Repositories Kolibri OS

Rev

Rev 4570 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. #include <linux/module.h>
  28.  
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include <drm/ttm/ttm_placement.h>
  32. #include <drm/ttm/ttm_bo_driver.h>
  33. #include <drm/ttm/ttm_object.h>
  34. //#include <drm/ttm/ttm_module.h>
  35. #include <linux/dma_remapping.h>
  36.  
  37. #define VMWGFX_DRIVER_NAME "vmwgfx"
  38. #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  39. #define VMWGFX_CHIP_SVGAII 0
  40. #define VMW_FB_RESERVATION 0
  41.  
  42. #define VMW_MIN_INITIAL_WIDTH 800
  43. #define VMW_MIN_INITIAL_HEIGHT 600
  44.  
  45. #if 0
  46. /**
  47.  * Fully encoded drm commands. Might move to vmw_drm.h
  48.  */
  49.  
  50. #define DRM_IOCTL_VMW_GET_PARAM                                 \
  51.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
  52.                  struct drm_vmw_getparam_arg)
  53. #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
  54.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
  55.                 union drm_vmw_alloc_dmabuf_arg)
  56. #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
  57.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
  58.                 struct drm_vmw_unref_dmabuf_arg)
  59. #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
  60.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
  61.                  struct drm_vmw_cursor_bypass_arg)
  62.  
  63. #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
  64.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
  65.                  struct drm_vmw_control_stream_arg)
  66. #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
  67.         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
  68.                  struct drm_vmw_stream_arg)
  69. #define DRM_IOCTL_VMW_UNREF_STREAM                              \
  70.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
  71.                  struct drm_vmw_stream_arg)
  72.  
  73. #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
  74.         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
  75.                 struct drm_vmw_context_arg)
  76. #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
  77.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
  78.                 struct drm_vmw_context_arg)
  79. #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
  80.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
  81.                  union drm_vmw_surface_create_arg)
  82. #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
  83.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
  84.                  struct drm_vmw_surface_arg)
  85. #define DRM_IOCTL_VMW_REF_SURFACE                               \
  86.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
  87.                  union drm_vmw_surface_reference_arg)
  88. #define DRM_IOCTL_VMW_EXECBUF                                   \
  89.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
  90.                 struct drm_vmw_execbuf_arg)
  91. #define DRM_IOCTL_VMW_GET_3D_CAP                                \
  92.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
  93.                  struct drm_vmw_get_3d_cap_arg)
  94. #define DRM_IOCTL_VMW_FENCE_WAIT                                \
  95.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
  96.                  struct drm_vmw_fence_wait_arg)
  97. #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
  98.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
  99.                  struct drm_vmw_fence_signaled_arg)
  100. #define DRM_IOCTL_VMW_FENCE_UNREF                               \
  101.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
  102.                  struct drm_vmw_fence_arg)
  103. #define DRM_IOCTL_VMW_FENCE_EVENT                               \
  104.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
  105.                  struct drm_vmw_fence_event_arg)
  106. #define DRM_IOCTL_VMW_PRESENT                                   \
  107.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
  108.                  struct drm_vmw_present_arg)
  109. #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
  110.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
  111.                  struct drm_vmw_present_readback_arg)
  112. #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
  113.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
  114.                  struct drm_vmw_update_layout_arg)
  115. #define DRM_IOCTL_VMW_CREATE_SHADER                             \
  116.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SHADER,      \
  117.                  struct drm_vmw_shader_create_arg)
  118. #define DRM_IOCTL_VMW_UNREF_SHADER                              \
  119.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SHADER,        \
  120.                  struct drm_vmw_shader_arg)
  121. #define DRM_IOCTL_VMW_GB_SURFACE_CREATE                         \
  122.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_CREATE,  \
  123.                  union drm_vmw_gb_surface_create_arg)
  124. #define DRM_IOCTL_VMW_GB_SURFACE_REF                            \
  125.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GB_SURFACE_REF,     \
  126.                  union drm_vmw_gb_surface_reference_arg)
  127. #define DRM_IOCTL_VMW_SYNCCPU                                   \
  128.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU,             \
  129.                  struct drm_vmw_synccpu_arg)
  130.  
  131. /**
  132.  * The core DRM version of this macro doesn't account for
  133.  * DRM_COMMAND_BASE.
  134.  */
  135.  
  136. #define VMW_IOCTL_DEF(ioctl, func, flags) \
  137.   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
  138.  
  139. /**
  140.  * Ioctl definitions.
  141.  */
  142.  
  143. static const struct drm_ioctl_desc vmw_ioctls[] = {
  144.         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
  145.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  146.         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
  147.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  148.         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
  149.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  150.         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
  151.                       vmw_kms_cursor_bypass_ioctl,
  152.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  153.  
  154.         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
  155.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  156.         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
  157.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  158.         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
  159.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  160.  
  161.         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
  162.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  163.         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
  164.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  165.         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
  166.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  167.         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
  168.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  169.         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
  170.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  171.         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
  172.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  173.         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
  174.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  175.         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
  176.                       vmw_fence_obj_signaled_ioctl,
  177.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  178.         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
  179.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  180.         VMW_IOCTL_DEF(VMW_FENCE_EVENT, vmw_fence_event_ioctl,
  181.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  182.         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
  183.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  184.  
  185.         /* these allow direct access to the framebuffers mark as master only */
  186.         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
  187.                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
  188.         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
  189.                       vmw_present_readback_ioctl,
  190.                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
  191.         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
  192.                       vmw_kms_update_layout_ioctl,
  193.                       DRM_MASTER | DRM_UNLOCKED),
  194.         VMW_IOCTL_DEF(VMW_CREATE_SHADER,
  195.                       vmw_shader_define_ioctl,
  196.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  197.         VMW_IOCTL_DEF(VMW_UNREF_SHADER,
  198.                       vmw_shader_destroy_ioctl,
  199.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  200.         VMW_IOCTL_DEF(VMW_GB_SURFACE_CREATE,
  201.                       vmw_gb_surface_define_ioctl,
  202.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  203.         VMW_IOCTL_DEF(VMW_GB_SURFACE_REF,
  204.                       vmw_gb_surface_reference_ioctl,
  205.                       DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW),
  206.         VMW_IOCTL_DEF(VMW_SYNCCPU,
  207.                       vmw_user_dmabuf_synccpu_ioctl,
  208.                       DRM_UNLOCKED | DRM_RENDER_ALLOW),
  209. };
  210. #endif
  211.  
  212. static struct pci_device_id vmw_pci_id_list[] = {
  213.         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
  214.         {0, 0, 0}
  215. };
  216.  
  217. static int enable_fbdev = 1;
  218. static int vmw_force_iommu;
  219. static int vmw_restrict_iommu;
  220. static int vmw_force_coherent;
  221. static int vmw_restrict_dma_mask;
  222.  
  223. static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
  224. static void vmw_master_init(struct vmw_master *);
  225.  
  226. MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
  227. module_param_named(enable_fbdev, enable_fbdev, int, 0600);
  228. MODULE_PARM_DESC(force_dma_api, "Force using the DMA API for TTM pages");
  229. module_param_named(force_dma_api, vmw_force_iommu, int, 0600);
  230. MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
  231. module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
  232. MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
  233. module_param_named(force_coherent, vmw_force_coherent, int, 0600);
  234. MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
  235. module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
  236.  
  237.  
  238. static void vmw_print_capabilities(uint32_t capabilities)
  239. {
  240.         DRM_INFO("Capabilities:\n");
  241.         if (capabilities & SVGA_CAP_RECT_COPY)
  242.                 DRM_INFO("  Rect copy.\n");
  243.         if (capabilities & SVGA_CAP_CURSOR)
  244.                 DRM_INFO("  Cursor.\n");
  245.         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
  246.                 DRM_INFO("  Cursor bypass.\n");
  247.         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
  248.                 DRM_INFO("  Cursor bypass 2.\n");
  249.         if (capabilities & SVGA_CAP_8BIT_EMULATION)
  250.                 DRM_INFO("  8bit emulation.\n");
  251.         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
  252.                 DRM_INFO("  Alpha cursor.\n");
  253.         if (capabilities & SVGA_CAP_3D)
  254.                 DRM_INFO("  3D.\n");
  255.         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
  256.                 DRM_INFO("  Extended Fifo.\n");
  257.         if (capabilities & SVGA_CAP_MULTIMON)
  258.                 DRM_INFO("  Multimon.\n");
  259.         if (capabilities & SVGA_CAP_PITCHLOCK)
  260.                 DRM_INFO("  Pitchlock.\n");
  261.         if (capabilities & SVGA_CAP_IRQMASK)
  262.                 DRM_INFO("  Irq mask.\n");
  263.         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
  264.                 DRM_INFO("  Display Topology.\n");
  265.         if (capabilities & SVGA_CAP_GMR)
  266.                 DRM_INFO("  GMR.\n");
  267.         if (capabilities & SVGA_CAP_TRACES)
  268.                 DRM_INFO("  Traces.\n");
  269.         if (capabilities & SVGA_CAP_GMR2)
  270.                 DRM_INFO("  GMR2.\n");
  271.         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
  272.                 DRM_INFO("  Screen Object 2.\n");
  273.         if (capabilities & SVGA_CAP_COMMAND_BUFFERS)
  274.                 DRM_INFO("  Command Buffers.\n");
  275.         if (capabilities & SVGA_CAP_CMD_BUFFERS_2)
  276.                 DRM_INFO("  Command Buffers 2.\n");
  277.         if (capabilities & SVGA_CAP_GBOBJECTS)
  278.                 DRM_INFO("  Guest Backed Resources.\n");
  279. }
  280.  
  281. /**
  282.  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  283.  *
  284.  * @dev_priv: A device private structure.
  285.  *
  286.  * This function creates a small buffer object that holds the query
  287.  * result for dummy queries emitted as query barriers.
  288.  * The function will then map the first page and initialize a pending
  289.  * occlusion query result structure, Finally it will unmap the buffer.
  290.  * No interruptible waits are done within this function.
  291.  *
  292.  * Returns an error if bo creation or initialization fails.
  293.  */
  294. static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
  295. {
  296.         int ret;
  297.         struct ttm_buffer_object *bo;
  298.         struct ttm_bo_kmap_obj map;
  299.         volatile SVGA3dQueryResult *result;
  300.         bool dummy;
  301.  
  302.         /*
  303.          * Create the bo as pinned, so that a tryreserve will
  304.          * immediately succeed. This is because we're the only
  305.          * user of the bo currently.
  306.          */
  307.         ret = ttm_bo_create(&dev_priv->bdev,
  308.                              PAGE_SIZE,
  309.                              ttm_bo_type_device,
  310.                             &vmw_sys_ne_placement,
  311.                              0, false, NULL,
  312.                             &bo);
  313.  
  314.         if (unlikely(ret != 0))
  315.                 return ret;
  316.  
  317.         ret = ttm_bo_reserve(bo, false, true, false, NULL);
  318.         BUG_ON(ret != 0);
  319.  
  320.         ret = ttm_bo_kmap(bo, 0, 1, &map);
  321.         if (likely(ret == 0)) {
  322.                 result = ttm_kmap_obj_virtual(&map, &dummy);
  323.                 result->totalSize = sizeof(*result);
  324.                 result->state = SVGA3D_QUERYSTATE_PENDING;
  325.                 result->result32 = 0xff;
  326.                 ttm_bo_kunmap(&map);
  327.         }
  328.         vmw_bo_pin(bo, false);
  329.         ttm_bo_unreserve(bo);
  330.  
  331.         if (unlikely(ret != 0)) {
  332.                 DRM_ERROR("Dummy query buffer map failed.\n");
  333.                 ttm_bo_unref(&bo);
  334.         } else
  335.                 dev_priv->dummy_query_bo = bo;
  336.  
  337.         return ret;
  338. }
  339.  
  340. static int vmw_request_device(struct vmw_private *dev_priv)
  341. {
  342.         int ret;
  343.  
  344.         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
  345.         if (unlikely(ret != 0)) {
  346.                 DRM_ERROR("Unable to initialize FIFO.\n");
  347.                 return ret;
  348.         }
  349. //   vmw_fence_fifo_up(dev_priv->fman);
  350. //   ret = vmw_dummy_query_bo_create(dev_priv);
  351. //   if (unlikely(ret != 0))
  352. //       goto out_no_query_bo;
  353. //   vmw_dummy_query_bo_prepare(dev_priv);
  354.  
  355.  
  356.  
  357.         return 0;
  358.  
  359. out_no_query_bo:
  360.         vmw_fence_fifo_down(dev_priv->fman);
  361.         vmw_fifo_release(dev_priv, &dev_priv->fifo);
  362.         return ret;
  363. }
  364.  
  365. static void vmw_release_device(struct vmw_private *dev_priv)
  366. {
  367.         /*
  368.          * Previous destructions should've released
  369.          * the pinned bo.
  370.          */
  371.  
  372.         BUG_ON(dev_priv->pinned_bo != NULL);
  373.  
  374.         ttm_bo_unref(&dev_priv->dummy_query_bo);
  375.         vmw_fence_fifo_down(dev_priv->fman);
  376.         vmw_fifo_release(dev_priv, &dev_priv->fifo);
  377. }
  378.  
  379.  
  380. /**
  381.  * Increase the 3d resource refcount.
  382.  * If the count was prevously zero, initialize the fifo, switching to svga
  383.  * mode. Note that the master holds a ref as well, and may request an
  384.  * explicit switch to svga mode if fb is not running, using @unhide_svga.
  385.  */
  386. int vmw_3d_resource_inc(struct vmw_private *dev_priv,
  387.                         bool unhide_svga)
  388. {
  389.         int ret = 0;
  390.  
  391.         mutex_lock(&dev_priv->release_mutex);
  392.         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
  393.         ret = vmw_request_device(dev_priv);
  394.                 if (unlikely(ret != 0))
  395.                         --dev_priv->num_3d_resources;
  396.         } else if (unhide_svga) {
  397.                 mutex_lock(&dev_priv->hw_mutex);
  398.                 vmw_write(dev_priv, SVGA_REG_ENABLE,
  399.                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
  400.                           ~SVGA_REG_ENABLE_HIDE);
  401.                 mutex_unlock(&dev_priv->hw_mutex);
  402.         }
  403.  
  404.         mutex_unlock(&dev_priv->release_mutex);
  405.         return ret;
  406. }
  407.  
  408. /**
  409.  * Decrease the 3d resource refcount.
  410.  * If the count reaches zero, disable the fifo, switching to vga mode.
  411.  * Note that the master holds a refcount as well, and may request an
  412.  * explicit switch to vga mode when it releases its refcount to account
  413.  * for the situation of an X server vt switch to VGA with 3d resources
  414.  * active.
  415.  */
  416. void vmw_3d_resource_dec(struct vmw_private *dev_priv,
  417.                          bool hide_svga)
  418. {
  419.         int32_t n3d;
  420.  
  421.         mutex_lock(&dev_priv->release_mutex);
  422.         if (unlikely(--dev_priv->num_3d_resources == 0))
  423.                 vmw_release_device(dev_priv);
  424.         else if (hide_svga) {
  425.                 mutex_lock(&dev_priv->hw_mutex);
  426.                 vmw_write(dev_priv, SVGA_REG_ENABLE,
  427.                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
  428.                           SVGA_REG_ENABLE_HIDE);
  429.                 mutex_unlock(&dev_priv->hw_mutex);
  430.         }
  431.  
  432.         n3d = (int32_t) dev_priv->num_3d_resources;
  433.         mutex_unlock(&dev_priv->release_mutex);
  434.  
  435.         BUG_ON(n3d < 0);
  436. }
  437.  
  438. /**
  439.  * Sets the initial_[width|height] fields on the given vmw_private.
  440.  *
  441.  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
  442.  * clamping the value to fb_max_[width|height] fields and the
  443.  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  444.  * If the values appear to be invalid, set them to
  445.  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  446.  */
  447. static void vmw_get_initial_size(struct vmw_private *dev_priv)
  448. {
  449.         uint32_t width;
  450.         uint32_t height;
  451.  
  452.         width = vmw_read(dev_priv, SVGA_REG_WIDTH);
  453.         height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
  454.  
  455.         width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
  456.         height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
  457.  
  458.         if (width > dev_priv->fb_max_width ||
  459.             height > dev_priv->fb_max_height) {
  460.  
  461.                 /*
  462.                  * This is a host error and shouldn't occur.
  463.                  */
  464.  
  465.                 width = VMW_MIN_INITIAL_WIDTH;
  466.                 height = VMW_MIN_INITIAL_HEIGHT;
  467.         }
  468.  
  469.         dev_priv->initial_width = width;
  470.         dev_priv->initial_height = height;
  471. }
  472.  
  473. /**
  474.  * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
  475.  * system.
  476.  *
  477.  * @dev_priv: Pointer to a struct vmw_private
  478.  *
  479.  * This functions tries to determine the IOMMU setup and what actions
  480.  * need to be taken by the driver to make system pages visible to the
  481.  * device.
  482.  * If this function decides that DMA is not possible, it returns -EINVAL.
  483.  * The driver may then try to disable features of the device that require
  484.  * DMA.
  485.  */
  486. static int vmw_dma_select_mode(struct vmw_private *dev_priv)
  487. {
  488.         static const char *names[vmw_dma_map_max] = {
  489.                 [vmw_dma_phys] = "Using physical TTM page addresses.",
  490.                 [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
  491.                 [vmw_dma_map_populate] = "Keeping DMA mappings.",
  492.                 [vmw_dma_map_bind] = "Giving up DMA mappings early."};
  493.  
  494.     dev_priv->map_mode = vmw_dma_phys;
  495.     DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
  496.  
  497.         return 0;
  498. }
  499.  
  500. /**
  501.  * vmw_dma_masks - set required page- and dma masks
  502.  *
  503.  * @dev: Pointer to struct drm-device
  504.  *
  505.  * With 32-bit we can only handle 32 bit PFNs. Optionally set that
  506.  * restriction also for 64-bit systems.
  507.  */
  508. #ifdef CONFIG_INTEL_IOMMU
  509. static int vmw_dma_masks(struct vmw_private *dev_priv)
  510. {
  511.         struct drm_device *dev = dev_priv->dev;
  512.  
  513.         if (intel_iommu_enabled &&
  514.             (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
  515.                 DRM_INFO("Restricting DMA addresses to 44 bits.\n");
  516.                 return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
  517.         }
  518.         return 0;
  519. }
  520. #else
  521. static int vmw_dma_masks(struct vmw_private *dev_priv)
  522. {
  523.         return 0;
  524. }
  525. #endif
  526.  
  527. static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
  528. {
  529.         struct vmw_private *dev_priv;
  530.         int ret;
  531.         uint32_t svga_id;
  532.         enum vmw_res_type i;
  533.         bool refuse_dma = false;
  534.  
  535.         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  536.         if (unlikely(dev_priv == NULL)) {
  537.                 DRM_ERROR("Failed allocating a device private struct.\n");
  538.                 return -ENOMEM;
  539.         }
  540.  
  541.         pci_set_master(dev->pdev);
  542.  
  543.         dev_priv->dev = dev;
  544.         dev_priv->vmw_chipset = chipset;
  545.         dev_priv->last_read_seqno = (uint32_t) -100;
  546.         mutex_init(&dev_priv->hw_mutex);
  547.         mutex_init(&dev_priv->cmdbuf_mutex);
  548.         mutex_init(&dev_priv->release_mutex);
  549.         mutex_init(&dev_priv->binding_mutex);
  550.         rwlock_init(&dev_priv->resource_lock);
  551.         ttm_lock_init(&dev_priv->reservation_sem);
  552.  
  553.         for (i = vmw_res_context; i < vmw_res_max; ++i) {
  554.                 idr_init(&dev_priv->res_idr[i]);
  555.                 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
  556.         }
  557.  
  558.         mutex_init(&dev_priv->init_mutex);
  559.         init_waitqueue_head(&dev_priv->fence_queue);
  560.         init_waitqueue_head(&dev_priv->fifo_queue);
  561.         dev_priv->fence_queue_waiters = 0;
  562.         atomic_set(&dev_priv->fifo_queue_waiters, 0);
  563.  
  564.         dev_priv->used_memory_size = 0;
  565.  
  566.         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
  567.         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
  568.         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
  569.  
  570.         dev_priv->enable_fb = enable_fbdev;
  571.  
  572.         mutex_lock(&dev_priv->hw_mutex);
  573.  
  574.     vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  575.         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
  576.         if (svga_id != SVGA_ID_2) {
  577.                 ret = -ENOSYS;
  578.                 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
  579.                 mutex_unlock(&dev_priv->hw_mutex);
  580.                 goto out_err0;
  581.         }
  582.  
  583.         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
  584.         ret = vmw_dma_select_mode(dev_priv);
  585.         if (unlikely(ret != 0)) {
  586.                 DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
  587.                 refuse_dma = true;
  588.         }
  589.  
  590.         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
  591.         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
  592.         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
  593.         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
  594.  
  595.         vmw_get_initial_size(dev_priv);
  596.  
  597.         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  598.                 dev_priv->max_gmr_ids =
  599.                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
  600.                 dev_priv->max_gmr_pages =
  601.                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
  602.                 dev_priv->memory_size =
  603.                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
  604.                 dev_priv->memory_size -= dev_priv->vram_size;
  605.         } else {
  606.                 /*
  607.                  * An arbitrary limit of 512MiB on surface
  608.                  * memory. But all HWV8 hardware supports GMR2.
  609.                  */
  610.                 dev_priv->memory_size = 512*1024*1024;
  611.         }
  612.         dev_priv->max_mob_pages = 0;
  613.         dev_priv->max_mob_size = 0;
  614.         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  615.                 uint64_t mem_size =
  616.                         vmw_read(dev_priv,
  617.                                  SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB);
  618.  
  619.                 dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE;
  620.                 dev_priv->prim_bb_mem =
  621.                         vmw_read(dev_priv,
  622.                                  SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
  623.                 dev_priv->max_mob_size =
  624.                         vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
  625.         } else
  626.                 dev_priv->prim_bb_mem = dev_priv->vram_size;
  627.  
  628.         ret = vmw_dma_masks(dev_priv);
  629.         if (unlikely(ret != 0)) {
  630.                 mutex_unlock(&dev_priv->hw_mutex);
  631.                 goto out_err0;
  632.         }
  633.  
  634.         if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size))
  635.                 dev_priv->prim_bb_mem = dev_priv->vram_size;
  636.  
  637.         mutex_unlock(&dev_priv->hw_mutex);
  638.  
  639.         vmw_print_capabilities(dev_priv->capabilities);
  640.  
  641.         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  642.                 DRM_INFO("Max GMR ids is %u\n",
  643.                          (unsigned)dev_priv->max_gmr_ids);
  644.                 DRM_INFO("Max number of GMR pages is %u\n",
  645.                          (unsigned)dev_priv->max_gmr_pages);
  646.                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
  647.                          (unsigned)dev_priv->memory_size / 1024);
  648.         }
  649.         DRM_INFO("Maximum display memory size is %u kiB\n",
  650.                  dev_priv->prim_bb_mem / 1024);
  651.         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
  652.                  dev_priv->vram_start, dev_priv->vram_size / 1024);
  653.         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
  654.                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
  655.  
  656.         ret = vmw_ttm_global_init(dev_priv);
  657.         if (unlikely(ret != 0))
  658.                 goto out_err0;
  659.  
  660.  
  661.         vmw_master_init(&dev_priv->fbdev_master);
  662.         dev_priv->active_master = &dev_priv->fbdev_master;
  663.  
  664.  
  665.         ret = ttm_bo_device_init(&dev_priv->bdev,
  666.                                  dev_priv->bo_global_ref.ref.object,
  667.                                  &vmw_bo_driver,
  668.                                  NULL,
  669.                                  VMWGFX_FILE_PAGE_OFFSET,
  670.                                  false);
  671.         if (unlikely(ret != 0)) {
  672.                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
  673.                 goto out_err1;
  674.         }
  675.  
  676.         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
  677.                              (dev_priv->vram_size >> PAGE_SHIFT));
  678.         if (unlikely(ret != 0)) {
  679.                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
  680.                 goto out_err2;
  681.         }
  682.  
  683.         dev_priv->has_gmr = true;
  684.         if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) ||
  685.             refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
  686.                                          VMW_PL_GMR) != 0) {
  687.                 DRM_INFO("No GMR memory available. "
  688.                          "Graphics memory resources are very limited.\n");
  689.                 dev_priv->has_gmr = false;
  690.         }
  691.  
  692.         if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
  693.                 dev_priv->has_mob = true;
  694.                 if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
  695.                                    VMW_PL_MOB) != 0) {
  696.                         DRM_INFO("No MOB memory available. "
  697.                                  "3D will be disabled.\n");
  698.                         dev_priv->has_mob = false;
  699.                 }
  700.         }
  701.         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
  702.                                          dev_priv->mmio_size);
  703.  
  704.         if (unlikely(dev_priv->mmio_virt == NULL)) {
  705.                 ret = -ENOMEM;
  706.                 DRM_ERROR("Failed mapping MMIO.\n");
  707.                 goto out_err3;
  708.         }
  709.  
  710.         /* Need mmio memory to check for fifo pitchlock cap. */
  711.         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  712.             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
  713.             !vmw_fifo_have_pitchlock(dev_priv)) {
  714.                 ret = -ENOSYS;
  715.                 DRM_ERROR("Hardware has no pitchlock\n");
  716.                 goto out_err4;
  717.         }
  718.  
  719.         dev_priv->tdev = ttm_object_device_init
  720.                 (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
  721.  
  722.         if (unlikely(dev_priv->tdev == NULL)) {
  723.                 DRM_ERROR("Unable to initialize TTM object management.\n");
  724.                 ret = -ENOMEM;
  725.                 goto out_err4;
  726.         }
  727.  
  728.         dev->dev_private = dev_priv;
  729.  
  730. #if 0
  731.  
  732.         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
  733.                 ret = drm_irq_install(dev, dev->pdev->irq);
  734.                 if (ret != 0) {
  735.                         DRM_ERROR("Failed installing irq: %d\n", ret);
  736.                         goto out_no_irq;
  737.                 }
  738.         }
  739.  
  740.         dev_priv->fman = vmw_fence_manager_init(dev_priv);
  741.         if (unlikely(dev_priv->fman == NULL)) {
  742.                 ret = -ENOMEM;
  743.                 goto out_no_fman;
  744.         }
  745.  
  746.         vmw_kms_save_vga(dev_priv);
  747. #endif
  748.  
  749.         /* Start kms and overlay systems, needs fifo. */
  750.         ret = vmw_kms_init(dev_priv);
  751.         if (unlikely(ret != 0))
  752.                 goto out_no_kms;
  753.  
  754.     if (dev_priv->enable_fb) {
  755.        ret = vmw_3d_resource_inc(dev_priv, true);
  756.        if (unlikely(ret != 0))
  757.            goto out_no_fifo;
  758. //       vmw_fb_init(dev_priv);
  759.     }
  760.  
  761.     main_device = dev;
  762.  
  763.         return 0;
  764.  
  765. out_no_fifo:
  766. //   vmw_overlay_close(dev_priv);
  767. //   vmw_kms_close(dev_priv);
  768. out_no_kms:
  769. //   vmw_kms_restore_vga(dev_priv);
  770. //   vmw_fence_manager_takedown(dev_priv->fman);
  771. out_no_fman:
  772. //   if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  773. //       drm_irq_uninstall(dev_priv->dev);
  774. out_no_irq:
  775. //   if (dev_priv->stealth)
  776. //       pci_release_region(dev->pdev, 2);
  777. //   else
  778. //       pci_release_regions(dev->pdev);
  779. out_no_device:
  780. //   ttm_object_device_release(&dev_priv->tdev);
  781. out_err4:
  782. //   iounmap(dev_priv->mmio_virt);
  783. out_err3:
  784. //   arch_phys_wc_del(dev_priv->mmio_mtrr);
  785. //   if (dev_priv->has_gmr)
  786. //       (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  787. //   (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  788. out_err2:
  789. //   (void)ttm_bo_device_release(&dev_priv->bdev);
  790. out_err1:
  791. //   vmw_ttm_global_release(dev_priv);
  792. out_err0:
  793. //   for (i = vmw_res_context; i < vmw_res_max; ++i)
  794. //       idr_destroy(&dev_priv->res_idr[i]);
  795.  
  796.         kfree(dev_priv);
  797.         return ret;
  798. }
  799.  
  800. #if 0
  801. static int vmw_driver_unload(struct drm_device *dev)
  802. {
  803.         struct vmw_private *dev_priv = vmw_priv(dev);
  804.         enum vmw_res_type i;
  805.  
  806.         unregister_pm_notifier(&dev_priv->pm_nb);
  807.  
  808.         if (dev_priv->ctx.res_ht_initialized)
  809.                 drm_ht_remove(&dev_priv->ctx.res_ht);
  810.         if (dev_priv->ctx.cmd_bounce)
  811.                 vfree(dev_priv->ctx.cmd_bounce);
  812.         if (dev_priv->enable_fb) {
  813.                 vmw_fb_close(dev_priv);
  814.                 vmw_kms_restore_vga(dev_priv);
  815.                 vmw_3d_resource_dec(dev_priv, false);
  816.         }
  817.         vmw_kms_close(dev_priv);
  818.         vmw_overlay_close(dev_priv);
  819.         vmw_fence_manager_takedown(dev_priv->fman);
  820.         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  821.                 drm_irq_uninstall(dev_priv->dev);
  822.         if (dev_priv->stealth)
  823.                 pci_release_region(dev->pdev, 2);
  824.         else
  825.                 pci_release_regions(dev->pdev);
  826.  
  827.         ttm_object_device_release(&dev_priv->tdev);
  828.         iounmap(dev_priv->mmio_virt);
  829.         arch_phys_wc_del(dev_priv->mmio_mtrr);
  830.         if (dev_priv->has_mob)
  831.                 (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB);
  832.         if (dev_priv->has_gmr)
  833.                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  834.         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  835.         (void)ttm_bo_device_release(&dev_priv->bdev);
  836.         vmw_ttm_global_release(dev_priv);
  837.  
  838.         for (i = vmw_res_context; i < vmw_res_max; ++i)
  839.                 idr_destroy(&dev_priv->res_idr[i]);
  840.  
  841.         kfree(dev_priv);
  842.  
  843.         return 0;
  844. }
  845.  
  846. static void vmw_preclose(struct drm_device *dev,
  847.                          struct drm_file *file_priv)
  848. {
  849.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  850.         struct vmw_private *dev_priv = vmw_priv(dev);
  851.  
  852.         vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
  853. }
  854.  
  855. static void vmw_postclose(struct drm_device *dev,
  856.                          struct drm_file *file_priv)
  857. {
  858.         struct vmw_fpriv *vmw_fp;
  859.  
  860.         vmw_fp = vmw_fpriv(file_priv);
  861.  
  862.         if (vmw_fp->locked_master) {
  863.                 struct vmw_master *vmaster =
  864.                         vmw_master(vmw_fp->locked_master);
  865.  
  866.                 ttm_vt_unlock(&vmaster->lock);
  867.                 drm_master_put(&vmw_fp->locked_master);
  868.         }
  869.  
  870.         ttm_object_file_release(&vmw_fp->tfile);
  871.         kfree(vmw_fp);
  872. }
  873. #endif
  874.  
  875. static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
  876. {
  877.         struct vmw_private *dev_priv = vmw_priv(dev);
  878.         struct vmw_fpriv *vmw_fp;
  879.         int ret = -ENOMEM;
  880.  
  881.         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
  882.         if (unlikely(vmw_fp == NULL))
  883.                 return ret;
  884.  
  885.         INIT_LIST_HEAD(&vmw_fp->fence_events);
  886. //   vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
  887. //   if (unlikely(vmw_fp->tfile == NULL))
  888. //       goto out_no_tfile;
  889.  
  890.         file_priv->driver_priv = vmw_fp;
  891.  
  892.         return 0;
  893.  
  894. out_no_tfile:
  895.         kfree(vmw_fp);
  896.         return ret;
  897. }
  898.  
  899. #if 0
  900. static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
  901.                                unsigned long arg)
  902. {
  903.         struct drm_file *file_priv = filp->private_data;
  904.         struct drm_device *dev = file_priv->minor->dev;
  905.         unsigned int nr = DRM_IOCTL_NR(cmd);
  906.  
  907.         /*
  908.          * Do extra checking on driver private ioctls.
  909.          */
  910.  
  911.         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
  912.             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
  913.                 const struct drm_ioctl_desc *ioctl =
  914.                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
  915.  
  916.                 if (unlikely(ioctl->cmd_drv != cmd)) {
  917.                         DRM_ERROR("Invalid command format, ioctl %d\n",
  918.                                   nr - DRM_COMMAND_BASE);
  919.                         return -EINVAL;
  920.                 }
  921.         }
  922.  
  923.         return drm_ioctl(filp, cmd, arg);
  924. }
  925.  
  926. static void vmw_lastclose(struct drm_device *dev)
  927. {
  928.         struct drm_crtc *crtc;
  929.         struct drm_mode_set set;
  930.         int ret;
  931.  
  932.         set.x = 0;
  933.         set.y = 0;
  934.         set.fb = NULL;
  935.         set.mode = NULL;
  936.         set.connectors = NULL;
  937.         set.num_connectors = 0;
  938.  
  939.         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  940.                 set.crtc = crtc;
  941.                 ret = drm_mode_set_config_internal(&set);
  942.                 WARN_ON(ret != 0);
  943.         }
  944.  
  945. }
  946. #endif
  947.  
  948. static void vmw_master_init(struct vmw_master *vmaster)
  949. {
  950.         ttm_lock_init(&vmaster->lock);
  951.         INIT_LIST_HEAD(&vmaster->fb_surf);
  952.         mutex_init(&vmaster->fb_surf_mutex);
  953. }
  954.  
  955. static int vmw_master_create(struct drm_device *dev,
  956.                              struct drm_master *master)
  957. {
  958.         struct vmw_master *vmaster;
  959.  
  960.         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
  961.         if (unlikely(vmaster == NULL))
  962.                 return -ENOMEM;
  963.  
  964.         vmw_master_init(vmaster);
  965. //      ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  966.         master->driver_priv = vmaster;
  967.  
  968.         return 0;
  969. }
  970.  
  971. static void vmw_master_destroy(struct drm_device *dev,
  972.                                struct drm_master *master)
  973. {
  974.         struct vmw_master *vmaster = vmw_master(master);
  975.  
  976.         master->driver_priv = NULL;
  977.         kfree(vmaster);
  978. }
  979.  
  980. #if 0
  981. static int vmw_master_set(struct drm_device *dev,
  982.                           struct drm_file *file_priv,
  983.                           bool from_open)
  984. {
  985.         struct vmw_private *dev_priv = vmw_priv(dev);
  986.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  987.         struct vmw_master *active = dev_priv->active_master;
  988.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  989.         int ret = 0;
  990.  
  991.         if (!dev_priv->enable_fb) {
  992.                 ret = vmw_3d_resource_inc(dev_priv, true);
  993.                 if (unlikely(ret != 0))
  994.                         return ret;
  995.                 vmw_kms_save_vga(dev_priv);
  996.                 mutex_lock(&dev_priv->hw_mutex);
  997.                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
  998.                 mutex_unlock(&dev_priv->hw_mutex);
  999.         }
  1000.  
  1001.         if (active) {
  1002.                 BUG_ON(active != &dev_priv->fbdev_master);
  1003.                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
  1004.                 if (unlikely(ret != 0))
  1005.                         goto out_no_active_lock;
  1006.  
  1007.                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
  1008.                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
  1009.                 if (unlikely(ret != 0)) {
  1010.                         DRM_ERROR("Unable to clean VRAM on "
  1011.                                   "master drop.\n");
  1012.                 }
  1013.  
  1014.                 dev_priv->active_master = NULL;
  1015.         }
  1016.  
  1017.         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  1018.         if (!from_open) {
  1019.                 ttm_vt_unlock(&vmaster->lock);
  1020.                 BUG_ON(vmw_fp->locked_master != file_priv->master);
  1021.                 drm_master_put(&vmw_fp->locked_master);
  1022.         }
  1023.  
  1024.         dev_priv->active_master = vmaster;
  1025.  
  1026.         return 0;
  1027.  
  1028. out_no_active_lock:
  1029.         if (!dev_priv->enable_fb) {
  1030.                 vmw_kms_restore_vga(dev_priv);
  1031.                 vmw_3d_resource_dec(dev_priv, true);
  1032.                 mutex_lock(&dev_priv->hw_mutex);
  1033.                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
  1034.                 mutex_unlock(&dev_priv->hw_mutex);
  1035.         }
  1036.         return ret;
  1037. }
  1038.  
  1039. static void vmw_master_drop(struct drm_device *dev,
  1040.                             struct drm_file *file_priv,
  1041.                             bool from_release)
  1042. {
  1043.         struct vmw_private *dev_priv = vmw_priv(dev);
  1044.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  1045.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  1046.         int ret;
  1047.  
  1048.         /**
  1049.          * Make sure the master doesn't disappear while we have
  1050.          * it locked.
  1051.          */
  1052.  
  1053.         vmw_fp->locked_master = drm_master_get(file_priv->master);
  1054.         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
  1055.         if (unlikely((ret != 0))) {
  1056.                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
  1057.                 drm_master_put(&vmw_fp->locked_master);
  1058.         }
  1059.  
  1060.         vmw_execbuf_release_pinned_bo(dev_priv);
  1061.  
  1062.         if (!dev_priv->enable_fb) {
  1063.                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
  1064.                 if (unlikely(ret != 0))
  1065.                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
  1066.                 vmw_kms_restore_vga(dev_priv);
  1067.                 vmw_3d_resource_dec(dev_priv, true);
  1068.                 mutex_lock(&dev_priv->hw_mutex);
  1069.                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
  1070.                 mutex_unlock(&dev_priv->hw_mutex);
  1071.         }
  1072.  
  1073.         dev_priv->active_master = &dev_priv->fbdev_master;
  1074.         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  1075.         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
  1076.  
  1077.         if (dev_priv->enable_fb)
  1078.                 vmw_fb_on(dev_priv);
  1079. }
  1080.  
  1081.  
  1082. static void vmw_remove(struct pci_dev *pdev)
  1083. {
  1084.         struct drm_device *dev = pci_get_drvdata(pdev);
  1085.  
  1086.         drm_put_dev(dev);
  1087. }
  1088.  
  1089. static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  1090.                               void *ptr)
  1091. {
  1092.         struct vmw_private *dev_priv =
  1093.                 container_of(nb, struct vmw_private, pm_nb);
  1094.  
  1095.         switch (val) {
  1096.         case PM_HIBERNATION_PREPARE:
  1097.         case PM_SUSPEND_PREPARE:
  1098.                 ttm_suspend_lock(&dev_priv->reservation_sem);
  1099.  
  1100.                 /**
  1101.                  * This empties VRAM and unbinds all GMR bindings.
  1102.                  * Buffer contents is moved to swappable memory.
  1103.                  */
  1104.                 vmw_execbuf_release_pinned_bo(dev_priv);
  1105.                 vmw_resource_evict_all(dev_priv);
  1106.                 ttm_bo_swapout_all(&dev_priv->bdev);
  1107.  
  1108.                 break;
  1109.         case PM_POST_HIBERNATION:
  1110.         case PM_POST_SUSPEND:
  1111.         case PM_POST_RESTORE:
  1112.                 ttm_suspend_unlock(&dev_priv->reservation_sem);
  1113.  
  1114.                 break;
  1115.         case PM_RESTORE_PREPARE:
  1116.                 break;
  1117.         default:
  1118.                 break;
  1119.         }
  1120.         return 0;
  1121. }
  1122.  
  1123. /**
  1124.  * These might not be needed with the virtual SVGA device.
  1125.  */
  1126.  
  1127. static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  1128. {
  1129.         struct drm_device *dev = pci_get_drvdata(pdev);
  1130.         struct vmw_private *dev_priv = vmw_priv(dev);
  1131.  
  1132.         if (dev_priv->num_3d_resources != 0) {
  1133.                 DRM_INFO("Can't suspend or hibernate "
  1134.                          "while 3D resources are active.\n");
  1135.                 return -EBUSY;
  1136.         }
  1137.  
  1138.         pci_save_state(pdev);
  1139.         pci_disable_device(pdev);
  1140.         pci_set_power_state(pdev, PCI_D3hot);
  1141.         return 0;
  1142. }
  1143.  
  1144. static int vmw_pci_resume(struct pci_dev *pdev)
  1145. {
  1146.         pci_set_power_state(pdev, PCI_D0);
  1147.         pci_restore_state(pdev);
  1148.         return pci_enable_device(pdev);
  1149. }
  1150.  
  1151. static int vmw_pm_suspend(struct device *kdev)
  1152. {
  1153.         struct pci_dev *pdev = to_pci_dev(kdev);
  1154.         struct pm_message dummy;
  1155.  
  1156.         dummy.event = 0;
  1157.  
  1158.         return vmw_pci_suspend(pdev, dummy);
  1159. }
  1160.  
  1161. static int vmw_pm_resume(struct device *kdev)
  1162. {
  1163.         struct pci_dev *pdev = to_pci_dev(kdev);
  1164.  
  1165.         return vmw_pci_resume(pdev);
  1166. }
  1167.  
  1168. static int vmw_pm_prepare(struct device *kdev)
  1169. {
  1170.         struct pci_dev *pdev = to_pci_dev(kdev);
  1171.         struct drm_device *dev = pci_get_drvdata(pdev);
  1172.         struct vmw_private *dev_priv = vmw_priv(dev);
  1173.  
  1174.         /**
  1175.          * Release 3d reference held by fbdev and potentially
  1176.          * stop fifo.
  1177.          */
  1178.         dev_priv->suspended = true;
  1179.         if (dev_priv->enable_fb)
  1180.                         vmw_3d_resource_dec(dev_priv, true);
  1181.  
  1182.         if (dev_priv->num_3d_resources != 0) {
  1183.  
  1184.                 DRM_INFO("Can't suspend or hibernate "
  1185.                          "while 3D resources are active.\n");
  1186.  
  1187.                 if (dev_priv->enable_fb)
  1188.                         vmw_3d_resource_inc(dev_priv, true);
  1189.                 dev_priv->suspended = false;
  1190.                 return -EBUSY;
  1191.         }
  1192.  
  1193.         return 0;
  1194. }
  1195.  
  1196. #endif
  1197.  
  1198. static struct drm_driver driver = {
  1199.         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
  1200.         DRIVER_MODESET | DRIVER_RENDER,
  1201.    .load = vmw_driver_load,
  1202. //      .unload = vmw_driver_unload,
  1203. //      .lastclose = vmw_lastclose,
  1204.    .irq_preinstall = vmw_irq_preinstall,
  1205.    .irq_postinstall = vmw_irq_postinstall,
  1206. //   .irq_uninstall = vmw_irq_uninstall,
  1207.    .irq_handler = vmw_irq_handler,
  1208. //   .get_vblank_counter = vmw_get_vblank_counter,
  1209. //   .enable_vblank = vmw_enable_vblank,
  1210. //   .disable_vblank = vmw_disable_vblank,
  1211. //   .ioctls = vmw_ioctls,
  1212. //   .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
  1213.      .open = vmw_driver_open,
  1214. //   .preclose = vmw_preclose,
  1215. //   .postclose = vmw_postclose,
  1216.  
  1217. //   .dumb_create = vmw_dumb_create,
  1218. //   .dumb_map_offset = vmw_dumb_map_offset,
  1219. //   .dumb_destroy = vmw_dumb_destroy,
  1220.  
  1221.  
  1222. };
  1223.  
  1224. #if 0
  1225. static struct pci_driver vmw_pci_driver = {
  1226.         .name = VMWGFX_DRIVER_NAME,
  1227.         .id_table = vmw_pci_id_list,
  1228.         .probe = vmw_probe,
  1229.         .remove = vmw_remove,
  1230.         .driver = {
  1231.                 .pm = &vmw_pm_ops
  1232.         }
  1233. };
  1234.  
  1235. static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1236. {
  1237.         return drm_get_pci_dev(pdev, ent, &driver);
  1238. }
  1239. #endif
  1240.  
  1241. int vmw_init(void)
  1242. {
  1243.     static pci_dev_t device;
  1244.     const struct pci_device_id  *ent;
  1245.     int  err;
  1246.  
  1247.  
  1248.     ent = find_pci_device(&device, vmw_pci_id_list);
  1249.     if( unlikely(ent == NULL) )
  1250.     {
  1251.         dbgprintf("device not found\n");
  1252.         return -ENODEV;
  1253.     };
  1254.  
  1255.     drm_core_init();
  1256.  
  1257.     DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
  1258.                                 device.pci_dev.device);
  1259.  
  1260.     err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
  1261.  
  1262.     return err;
  1263. }
  1264.  
  1265.  
  1266. MODULE_AUTHOR("VMware Inc. and others");
  1267. MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
  1268. MODULE_LICENSE("GPL and additional rights");
  1269.  
  1270.  
  1271. void *kmemdup(const void *src, size_t len, gfp_t gfp)
  1272. {
  1273.     void *p;
  1274.  
  1275.     p = kmalloc(len, gfp);
  1276.     if (p)
  1277.         memcpy(p, src, len);
  1278.     return p;
  1279. }
  1280.  
  1281.