Subversion Repositories Kolibri OS

Rev

Rev 4080 | Rev 4569 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. #include <linux/module.h>
  28.  
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include <drm/ttm/ttm_placement.h>
  32. #include <drm/ttm/ttm_bo_driver.h>
  33. #include <drm/ttm/ttm_object.h>
  34. //#include <drm/ttm/ttm_module.h>
  35.  
  36. #define VMWGFX_DRIVER_NAME "vmwgfx"
  37. #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  38. #define VMWGFX_CHIP_SVGAII 0
  39. #define VMW_FB_RESERVATION 0
  40.  
  41. #define VMW_MIN_INITIAL_WIDTH 800
  42. #define VMW_MIN_INITIAL_HEIGHT 600
  43.  
  44. #if 0
  45. /**
  46.  * Fully encoded drm commands. Might move to vmw_drm.h
  47.  */
  48.  
  49. #define DRM_IOCTL_VMW_GET_PARAM                                 \
  50.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
  51.                  struct drm_vmw_getparam_arg)
  52. #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
  53.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
  54.                 union drm_vmw_alloc_dmabuf_arg)
  55. #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
  56.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
  57.                 struct drm_vmw_unref_dmabuf_arg)
  58. #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
  59.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
  60.                  struct drm_vmw_cursor_bypass_arg)
  61.  
  62. #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
  63.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
  64.                  struct drm_vmw_control_stream_arg)
  65. #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
  66.         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
  67.                  struct drm_vmw_stream_arg)
  68. #define DRM_IOCTL_VMW_UNREF_STREAM                              \
  69.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
  70.                  struct drm_vmw_stream_arg)
  71.  
  72. #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
  73.         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
  74.                 struct drm_vmw_context_arg)
  75. #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
  76.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
  77.                 struct drm_vmw_context_arg)
  78. #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
  79.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
  80.                  union drm_vmw_surface_create_arg)
  81. #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
  82.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
  83.                  struct drm_vmw_surface_arg)
  84. #define DRM_IOCTL_VMW_REF_SURFACE                               \
  85.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
  86.                  union drm_vmw_surface_reference_arg)
  87. #define DRM_IOCTL_VMW_EXECBUF                                   \
  88.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
  89.                 struct drm_vmw_execbuf_arg)
  90. #define DRM_IOCTL_VMW_GET_3D_CAP                                \
  91.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
  92.                  struct drm_vmw_get_3d_cap_arg)
  93. #define DRM_IOCTL_VMW_FENCE_WAIT                                \
  94.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
  95.                  struct drm_vmw_fence_wait_arg)
  96. #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
  97.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
  98.                  struct drm_vmw_fence_signaled_arg)
  99. #define DRM_IOCTL_VMW_FENCE_UNREF                               \
  100.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
  101.                  struct drm_vmw_fence_arg)
  102. #define DRM_IOCTL_VMW_FENCE_EVENT                               \
  103.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
  104.                  struct drm_vmw_fence_event_arg)
  105. #define DRM_IOCTL_VMW_PRESENT                                   \
  106.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
  107.                  struct drm_vmw_present_arg)
  108. #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
  109.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
  110.                  struct drm_vmw_present_readback_arg)
  111. #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
  112.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
  113.                  struct drm_vmw_update_layout_arg)
  114.  
  115. /**
  116.  * The core DRM version of this macro doesn't account for
  117.  * DRM_COMMAND_BASE.
  118.  */
  119.  
  120. #define VMW_IOCTL_DEF(ioctl, func, flags) \
  121.   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
  122.  
  123. /**
  124.  * Ioctl definitions.
  125.  */
  126.  
  127. static const struct drm_ioctl_desc vmw_ioctls[] = {
  128.         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
  129.                       DRM_AUTH | DRM_UNLOCKED),
  130.         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
  131.                       DRM_AUTH | DRM_UNLOCKED),
  132.         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
  133.                       DRM_AUTH | DRM_UNLOCKED),
  134.         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
  135.                       vmw_kms_cursor_bypass_ioctl,
  136.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  137.  
  138.         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
  139.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  140.         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
  141.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  142.         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
  143.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  144.  
  145.         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
  146.                       DRM_AUTH | DRM_UNLOCKED),
  147.         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
  148.                       DRM_AUTH | DRM_UNLOCKED),
  149.         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
  150.                       DRM_AUTH | DRM_UNLOCKED),
  151.         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
  152.                       DRM_AUTH | DRM_UNLOCKED),
  153.         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
  154.                       DRM_AUTH | DRM_UNLOCKED),
  155.         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
  156.                       DRM_AUTH | DRM_UNLOCKED),
  157.         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
  158.                       DRM_AUTH | DRM_UNLOCKED),
  159.         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
  160.                       vmw_fence_obj_signaled_ioctl,
  161.                       DRM_AUTH | DRM_UNLOCKED),
  162.         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
  163.                       DRM_AUTH | DRM_UNLOCKED),
  164.         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
  165.                       vmw_fence_event_ioctl,
  166.                       DRM_AUTH | DRM_UNLOCKED),
  167.         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
  168.                       DRM_AUTH | DRM_UNLOCKED),
  169.  
  170.         /* these allow direct access to the framebuffers mark as master only */
  171.         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
  172.                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
  173.         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
  174.                       vmw_present_readback_ioctl,
  175.                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
  176.         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
  177.                       vmw_kms_update_layout_ioctl,
  178.                       DRM_MASTER | DRM_UNLOCKED),
  179. };
  180. #endif
  181.  
  182. static struct pci_device_id vmw_pci_id_list[] = {
  183.         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
  184.         {0, 0, 0}
  185. };
  186.  
  187. static int enable_fbdev = 1;
  188.  
  189. static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
  190. static void vmw_master_init(struct vmw_master *);
  191.  
  192. MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
  193. module_param_named(enable_fbdev, enable_fbdev, int, 0600);
  194.  
  195. static void vmw_print_capabilities(uint32_t capabilities)
  196. {
  197.         DRM_INFO("Capabilities:\n");
  198.         if (capabilities & SVGA_CAP_RECT_COPY)
  199.                 DRM_INFO("  Rect copy.\n");
  200.         if (capabilities & SVGA_CAP_CURSOR)
  201.                 DRM_INFO("  Cursor.\n");
  202.         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
  203.                 DRM_INFO("  Cursor bypass.\n");
  204.         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
  205.                 DRM_INFO("  Cursor bypass 2.\n");
  206.         if (capabilities & SVGA_CAP_8BIT_EMULATION)
  207.                 DRM_INFO("  8bit emulation.\n");
  208.         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
  209.                 DRM_INFO("  Alpha cursor.\n");
  210.         if (capabilities & SVGA_CAP_3D)
  211.                 DRM_INFO("  3D.\n");
  212.         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
  213.                 DRM_INFO("  Extended Fifo.\n");
  214.         if (capabilities & SVGA_CAP_MULTIMON)
  215.                 DRM_INFO("  Multimon.\n");
  216.         if (capabilities & SVGA_CAP_PITCHLOCK)
  217.                 DRM_INFO("  Pitchlock.\n");
  218.         if (capabilities & SVGA_CAP_IRQMASK)
  219.                 DRM_INFO("  Irq mask.\n");
  220.         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
  221.                 DRM_INFO("  Display Topology.\n");
  222.         if (capabilities & SVGA_CAP_GMR)
  223.                 DRM_INFO("  GMR.\n");
  224.         if (capabilities & SVGA_CAP_TRACES)
  225.                 DRM_INFO("  Traces.\n");
  226.         if (capabilities & SVGA_CAP_GMR2)
  227.                 DRM_INFO("  GMR2.\n");
  228.         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
  229.                 DRM_INFO("  Screen Object 2.\n");
  230. }
  231.  
  232.  
  233. /**
  234.  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
  235.  * the start of a buffer object.
  236.  *
  237.  * @dev_priv: The device private structure.
  238.  *
  239.  * This function will idle the buffer using an uninterruptible wait, then
  240.  * map the first page and initialize a pending occlusion query result structure,
  241.  * Finally it will unmap the buffer.
  242.  *
  243.  * TODO: Since we're only mapping a single page, we should optimize the map
  244.  * to use kmap_atomic / iomap_atomic.
  245.  */
  246. static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
  247. {
  248.         struct ttm_bo_kmap_obj map;
  249.         volatile SVGA3dQueryResult *result;
  250.         bool dummy;
  251.         int ret;
  252.         struct ttm_bo_device *bdev = &dev_priv->bdev;
  253.         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
  254.  
  255.         ttm_bo_reserve(bo, false, false, false, 0);
  256.         spin_lock(&bdev->fence_lock);
  257.     ret = 0; //ttm_bo_wait(bo, false, false, false);
  258.         spin_unlock(&bdev->fence_lock);
  259.         if (unlikely(ret != 0))
  260.                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
  261.                                          10*HZ);
  262. /*
  263.         ret = ttm_bo_kmap(bo, 0, 1, &map);
  264.         if (likely(ret == 0)) {
  265.                 result = ttm_kmap_obj_virtual(&map, &dummy);
  266.                 result->totalSize = sizeof(*result);
  267.                 result->state = SVGA3D_QUERYSTATE_PENDING;
  268.                 result->result32 = 0xff;
  269.                 ttm_bo_kunmap(&map);
  270.         } else
  271.                 DRM_ERROR("Dummy query buffer map failed.\n");
  272. */
  273.         ttm_bo_unreserve(bo);
  274. }
  275.  
  276.  
  277. /**
  278.  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  279.  *
  280.  * @dev_priv: A device private structure.
  281.  *
  282.  * This function creates a small buffer object that holds the query
  283.  * result for dummy queries emitted as query barriers.
  284.  * No interruptible waits are done within this function.
  285.  *
  286.  * Returns an error if bo creation fails.
  287.  */
  288. static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
  289. {
  290.         return ttm_bo_create(&dev_priv->bdev,
  291.                              PAGE_SIZE,
  292.                              ttm_bo_type_device,
  293.                              &vmw_vram_sys_placement,
  294.                              0, false, NULL,
  295.                              &dev_priv->dummy_query_bo);
  296. }
  297.  
  298.  
  299. static int vmw_request_device(struct vmw_private *dev_priv)
  300. {
  301.         int ret;
  302.     ENTER();
  303.  
  304.         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
  305.         if (unlikely(ret != 0)) {
  306.                 DRM_ERROR("Unable to initialize FIFO.\n");
  307.                 return ret;
  308.         }
  309. //   vmw_fence_fifo_up(dev_priv->fman);
  310. //   ret = vmw_dummy_query_bo_create(dev_priv);
  311. //   if (unlikely(ret != 0))
  312. //       goto out_no_query_bo;
  313. //   vmw_dummy_query_bo_prepare(dev_priv);
  314.  
  315.     LEAVE();
  316.  
  317.         return 0;
  318.  
  319. out_no_query_bo:
  320.         vmw_fence_fifo_down(dev_priv->fman);
  321.         vmw_fifo_release(dev_priv, &dev_priv->fifo);
  322.         return ret;
  323. }
  324.  
  325. static void vmw_release_device(struct vmw_private *dev_priv)
  326. {
  327.         /*
  328.          * Previous destructions should've released
  329.          * the pinned bo.
  330.          */
  331.  
  332.         BUG_ON(dev_priv->pinned_bo != NULL);
  333.  
  334.         ttm_bo_unref(&dev_priv->dummy_query_bo);
  335.         vmw_fence_fifo_down(dev_priv->fman);
  336.         vmw_fifo_release(dev_priv, &dev_priv->fifo);
  337. }
  338.  
  339. /**
  340.  * Increase the 3d resource refcount.
  341.  * If the count was prevously zero, initialize the fifo, switching to svga
  342.  * mode. Note that the master holds a ref as well, and may request an
  343.  * explicit switch to svga mode if fb is not running, using @unhide_svga.
  344.  */
  345. int vmw_3d_resource_inc(struct vmw_private *dev_priv,
  346.                         bool unhide_svga)
  347. {
  348.         int ret = 0;
  349.  
  350.     ENTER();
  351.  
  352.         mutex_lock(&dev_priv->release_mutex);
  353.         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
  354.         ret = vmw_request_device(dev_priv);
  355.                 if (unlikely(ret != 0))
  356.                         --dev_priv->num_3d_resources;
  357.         } else if (unhide_svga) {
  358.                 mutex_lock(&dev_priv->hw_mutex);
  359.                 vmw_write(dev_priv, SVGA_REG_ENABLE,
  360.                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
  361.                           ~SVGA_REG_ENABLE_HIDE);
  362.                 mutex_unlock(&dev_priv->hw_mutex);
  363.         }
  364.  
  365.         mutex_unlock(&dev_priv->release_mutex);
  366.     LEAVE();
  367.         return ret;
  368. }
  369.  
  370. /**
  371.  * Decrease the 3d resource refcount.
  372.  * If the count reaches zero, disable the fifo, switching to vga mode.
  373.  * Note that the master holds a refcount as well, and may request an
  374.  * explicit switch to vga mode when it releases its refcount to account
  375.  * for the situation of an X server vt switch to VGA with 3d resources
  376.  * active.
  377.  */
  378. void vmw_3d_resource_dec(struct vmw_private *dev_priv,
  379.                          bool hide_svga)
  380. {
  381.         int32_t n3d;
  382.  
  383.         mutex_lock(&dev_priv->release_mutex);
  384.         if (unlikely(--dev_priv->num_3d_resources == 0))
  385.                 vmw_release_device(dev_priv);
  386.         else if (hide_svga) {
  387.                 mutex_lock(&dev_priv->hw_mutex);
  388.                 vmw_write(dev_priv, SVGA_REG_ENABLE,
  389.                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
  390.                           SVGA_REG_ENABLE_HIDE);
  391.                 mutex_unlock(&dev_priv->hw_mutex);
  392.         }
  393.  
  394.         n3d = (int32_t) dev_priv->num_3d_resources;
  395.         mutex_unlock(&dev_priv->release_mutex);
  396.  
  397.         BUG_ON(n3d < 0);
  398. }
  399.  
  400. /**
  401.  * Sets the initial_[width|height] fields on the given vmw_private.
  402.  *
  403.  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
  404.  * clamping the value to fb_max_[width|height] fields and the
  405.  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  406.  * If the values appear to be invalid, set them to
  407.  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  408.  */
  409. static void vmw_get_initial_size(struct vmw_private *dev_priv)
  410. {
  411.         uint32_t width;
  412.         uint32_t height;
  413.  
  414.         width = vmw_read(dev_priv, SVGA_REG_WIDTH);
  415.         height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
  416.  
  417.         width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
  418.         height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
  419.  
  420.         if (width > dev_priv->fb_max_width ||
  421.             height > dev_priv->fb_max_height) {
  422.  
  423.                 /*
  424.                  * This is a host error and shouldn't occur.
  425.                  */
  426.  
  427.                 width = VMW_MIN_INITIAL_WIDTH;
  428.                 height = VMW_MIN_INITIAL_HEIGHT;
  429.         }
  430.  
  431.         dev_priv->initial_width = width;
  432.         dev_priv->initial_height = height;
  433. }
  434.  
  435. static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
  436. {
  437.         struct vmw_private *dev_priv;
  438.         int ret;
  439.         uint32_t svga_id;
  440.         enum vmw_res_type i;
  441.  
  442.     ENTER();
  443.  
  444.         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  445.         if (unlikely(dev_priv == NULL)) {
  446.                 DRM_ERROR("Failed allocating a device private struct.\n");
  447.                 return -ENOMEM;
  448.         }
  449.  
  450.         pci_set_master(dev->pdev);
  451.  
  452.         dev_priv->dev = dev;
  453.         dev_priv->vmw_chipset = chipset;
  454.         dev_priv->last_read_seqno = (uint32_t) -100;
  455.         mutex_init(&dev_priv->hw_mutex);
  456.         mutex_init(&dev_priv->cmdbuf_mutex);
  457.         mutex_init(&dev_priv->release_mutex);
  458.         rwlock_init(&dev_priv->resource_lock);
  459.  
  460.         for (i = vmw_res_context; i < vmw_res_max; ++i) {
  461.                 idr_init(&dev_priv->res_idr[i]);
  462.                 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
  463.         }
  464.  
  465.         mutex_init(&dev_priv->init_mutex);
  466.         init_waitqueue_head(&dev_priv->fence_queue);
  467.         init_waitqueue_head(&dev_priv->fifo_queue);
  468.         dev_priv->fence_queue_waiters = 0;
  469.         atomic_set(&dev_priv->fifo_queue_waiters, 0);
  470.  
  471.         dev_priv->used_memory_size = 0;
  472.  
  473.         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
  474.         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
  475.         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
  476.  
  477.     printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start,
  478.             dev_priv->vram_start,dev_priv->mmio_start);
  479.  
  480.         dev_priv->enable_fb = enable_fbdev;
  481.  
  482.         mutex_lock(&dev_priv->hw_mutex);
  483.  
  484.     vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  485.         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
  486.         if (svga_id != SVGA_ID_2) {
  487.                 ret = -ENOSYS;
  488.                 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
  489.                 mutex_unlock(&dev_priv->hw_mutex);
  490.                 goto out_err0;
  491.         }
  492.  
  493.         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
  494.  
  495.         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
  496.         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
  497.         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
  498.         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
  499.  
  500.         vmw_get_initial_size(dev_priv);
  501.  
  502.         if (dev_priv->capabilities & SVGA_CAP_GMR) {
  503.                 dev_priv->max_gmr_descriptors =
  504.                         vmw_read(dev_priv,
  505.                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
  506.                 dev_priv->max_gmr_ids =
  507.                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
  508.         }
  509.         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  510.                 dev_priv->max_gmr_pages =
  511.                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
  512.                 dev_priv->memory_size =
  513.                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
  514.                 dev_priv->memory_size -= dev_priv->vram_size;
  515.         } else {
  516.                 /*
  517.                  * An arbitrary limit of 512MiB on surface
  518.                  * memory. But all HWV8 hardware supports GMR2.
  519.                  */
  520.                 dev_priv->memory_size = 512*1024*1024;
  521.         }
  522.  
  523.         mutex_unlock(&dev_priv->hw_mutex);
  524.  
  525.         vmw_print_capabilities(dev_priv->capabilities);
  526.  
  527.         if (dev_priv->capabilities & SVGA_CAP_GMR) {
  528.                 DRM_INFO("Max GMR ids is %u\n",
  529.                          (unsigned)dev_priv->max_gmr_ids);
  530.                 DRM_INFO("Max GMR descriptors is %u\n",
  531.                          (unsigned)dev_priv->max_gmr_descriptors);
  532.         }
  533.         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  534.                 DRM_INFO("Max number of GMR pages is %u\n",
  535.                          (unsigned)dev_priv->max_gmr_pages);
  536.                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
  537.                          (unsigned)dev_priv->memory_size / 1024);
  538.         }
  539.         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
  540.                  dev_priv->vram_start, dev_priv->vram_size / 1024);
  541.         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
  542.                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
  543.  
  544.         ret = vmw_ttm_global_init(dev_priv);
  545.         if (unlikely(ret != 0))
  546.                 goto out_err0;
  547.  
  548.  
  549.  
  550.  
  551.         ret = ttm_bo_device_init(&dev_priv->bdev,
  552.                                  dev_priv->bo_global_ref.ref.object,
  553.                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
  554.                                  false);
  555.         if (unlikely(ret != 0)) {
  556.                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
  557.                 goto out_err1;
  558.         }
  559.  
  560.         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
  561.                              (dev_priv->vram_size >> PAGE_SHIFT));
  562.         if (unlikely(ret != 0)) {
  563.                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
  564.                 goto out_err2;
  565.         }
  566.  
  567.         dev_priv->has_gmr = true;
  568.         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
  569.                            dev_priv->max_gmr_ids) != 0) {
  570.                 DRM_INFO("No GMR memory available. "
  571.                          "Graphics memory resources are very limited.\n");
  572.                 dev_priv->has_gmr = false;
  573.         }
  574.  
  575.         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
  576.                                          dev_priv->mmio_size);
  577.  
  578.         if (unlikely(dev_priv->mmio_virt == NULL)) {
  579.                 ret = -ENOMEM;
  580.                 DRM_ERROR("Failed mapping MMIO.\n");
  581.                 goto out_err3;
  582.         }
  583.  
  584.         /* Need mmio memory to check for fifo pitchlock cap. */
  585.         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  586.             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
  587.             !vmw_fifo_have_pitchlock(dev_priv)) {
  588.                 ret = -ENOSYS;
  589.                 DRM_ERROR("Hardware has no pitchlock\n");
  590.                 goto out_err4;
  591.         }
  592.  
  593.         dev_priv->tdev = ttm_object_device_init
  594.             (dev_priv->mem_global_ref.object, 12);
  595.  
  596.         if (unlikely(dev_priv->tdev == NULL)) {
  597.                 DRM_ERROR("Unable to initialize TTM object management.\n");
  598.                 ret = -ENOMEM;
  599.                 goto out_err4;
  600.         }
  601.  
  602.         dev->dev_private = dev_priv;
  603.  
  604. #if 0
  605.  
  606.         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
  607.                 ret = drm_irq_install(dev);
  608.                 if (ret != 0) {
  609.                         DRM_ERROR("Failed installing irq: %d\n", ret);
  610.                         goto out_no_irq;
  611.                 }
  612.         }
  613.  
  614.         dev_priv->fman = vmw_fence_manager_init(dev_priv);
  615.         if (unlikely(dev_priv->fman == NULL)) {
  616.                 ret = -ENOMEM;
  617.                 goto out_no_fman;
  618.         }
  619.  
  620.         vmw_kms_save_vga(dev_priv);
  621. #endif
  622.  
  623.         /* Start kms and overlay systems, needs fifo. */
  624.         ret = vmw_kms_init(dev_priv);
  625.         if (unlikely(ret != 0))
  626.                 goto out_no_kms;
  627.  
  628.     if (dev_priv->enable_fb) {
  629.        ret = vmw_3d_resource_inc(dev_priv, true);
  630.        if (unlikely(ret != 0))
  631.            goto out_no_fifo;
  632. //       vmw_fb_init(dev_priv);
  633.     }
  634.  
  635.     main_device = dev;
  636.  
  637.     LEAVE();
  638.         return 0;
  639.  
  640. out_no_fifo:
  641. //   vmw_overlay_close(dev_priv);
  642. //   vmw_kms_close(dev_priv);
  643. out_no_kms:
  644. //   vmw_kms_restore_vga(dev_priv);
  645. //   vmw_fence_manager_takedown(dev_priv->fman);
  646. out_no_fman:
  647. //   if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  648. //       drm_irq_uninstall(dev_priv->dev);
  649. out_no_irq:
  650. //   if (dev_priv->stealth)
  651. //       pci_release_region(dev->pdev, 2);
  652. //   else
  653. //       pci_release_regions(dev->pdev);
  654. out_no_device:
  655. //   ttm_object_device_release(&dev_priv->tdev);
  656. out_err4:
  657. //   iounmap(dev_priv->mmio_virt);
  658. out_err3:
  659. //   arch_phys_wc_del(dev_priv->mmio_mtrr);
  660. //   if (dev_priv->has_gmr)
  661. //       (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  662. //   (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  663. out_err2:
  664. //   (void)ttm_bo_device_release(&dev_priv->bdev);
  665. out_err1:
  666. //   vmw_ttm_global_release(dev_priv);
  667. out_err0:
  668. //   for (i = vmw_res_context; i < vmw_res_max; ++i)
  669. //       idr_destroy(&dev_priv->res_idr[i]);
  670.  
  671.         kfree(dev_priv);
  672.         return ret;
  673. }
  674.  
  675. #if 0
  676. static int vmw_driver_unload(struct drm_device *dev)
  677. {
  678.         struct vmw_private *dev_priv = vmw_priv(dev);
  679.         enum vmw_res_type i;
  680.  
  681.         unregister_pm_notifier(&dev_priv->pm_nb);
  682.  
  683.         if (dev_priv->ctx.res_ht_initialized)
  684.                 drm_ht_remove(&dev_priv->ctx.res_ht);
  685.         if (dev_priv->ctx.cmd_bounce)
  686.                 vfree(dev_priv->ctx.cmd_bounce);
  687.         if (dev_priv->enable_fb) {
  688.                 vmw_fb_close(dev_priv);
  689.                 vmw_kms_restore_vga(dev_priv);
  690.                 vmw_3d_resource_dec(dev_priv, false);
  691.         }
  692.         vmw_kms_close(dev_priv);
  693.         vmw_overlay_close(dev_priv);
  694.         vmw_fence_manager_takedown(dev_priv->fman);
  695.         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  696.                 drm_irq_uninstall(dev_priv->dev);
  697.         if (dev_priv->stealth)
  698.                 pci_release_region(dev->pdev, 2);
  699.         else
  700.                 pci_release_regions(dev->pdev);
  701.  
  702.         ttm_object_device_release(&dev_priv->tdev);
  703.         iounmap(dev_priv->mmio_virt);
  704.         arch_phys_wc_del(dev_priv->mmio_mtrr);
  705.         if (dev_priv->has_gmr)
  706.                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  707.         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  708.         (void)ttm_bo_device_release(&dev_priv->bdev);
  709.         vmw_ttm_global_release(dev_priv);
  710.  
  711.         for (i = vmw_res_context; i < vmw_res_max; ++i)
  712.                 idr_destroy(&dev_priv->res_idr[i]);
  713.  
  714.         kfree(dev_priv);
  715.  
  716.         return 0;
  717. }
  718.  
  719. static void vmw_preclose(struct drm_device *dev,
  720.                          struct drm_file *file_priv)
  721. {
  722.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  723.         struct vmw_private *dev_priv = vmw_priv(dev);
  724.  
  725.         vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
  726. }
  727.  
  728. static void vmw_postclose(struct drm_device *dev,
  729.                          struct drm_file *file_priv)
  730. {
  731.         struct vmw_fpriv *vmw_fp;
  732.  
  733.         vmw_fp = vmw_fpriv(file_priv);
  734.         ttm_object_file_release(&vmw_fp->tfile);
  735.         if (vmw_fp->locked_master)
  736.                 drm_master_put(&vmw_fp->locked_master);
  737.         kfree(vmw_fp);
  738. }
  739. #endif
  740.  
  741. static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
  742. {
  743.         struct vmw_private *dev_priv = vmw_priv(dev);
  744.         struct vmw_fpriv *vmw_fp;
  745.         int ret = -ENOMEM;
  746.  
  747.         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
  748.         if (unlikely(vmw_fp == NULL))
  749.                 return ret;
  750.  
  751.         INIT_LIST_HEAD(&vmw_fp->fence_events);
  752. //   vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
  753. //   if (unlikely(vmw_fp->tfile == NULL))
  754. //       goto out_no_tfile;
  755.  
  756.         file_priv->driver_priv = vmw_fp;
  757. //   dev_priv->bdev.dev_mapping = dev->dev_mapping;
  758.  
  759.         return 0;
  760.  
  761. out_no_tfile:
  762.         kfree(vmw_fp);
  763.         return ret;
  764. }
  765.  
  766. #if 0
  767. static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
  768.                                unsigned long arg)
  769. {
  770.         struct drm_file *file_priv = filp->private_data;
  771.         struct drm_device *dev = file_priv->minor->dev;
  772.         unsigned int nr = DRM_IOCTL_NR(cmd);
  773.  
  774.         /*
  775.          * Do extra checking on driver private ioctls.
  776.          */
  777.  
  778.         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
  779.             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
  780.                 const struct drm_ioctl_desc *ioctl =
  781.                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
  782.  
  783.                 if (unlikely(ioctl->cmd_drv != cmd)) {
  784.                         DRM_ERROR("Invalid command format, ioctl %d\n",
  785.                                   nr - DRM_COMMAND_BASE);
  786.                         return -EINVAL;
  787.                 }
  788.         }
  789.  
  790.         return drm_ioctl(filp, cmd, arg);
  791. }
  792.  
  793. static void vmw_lastclose(struct drm_device *dev)
  794. {
  795.         struct drm_crtc *crtc;
  796.         struct drm_mode_set set;
  797.         int ret;
  798.  
  799.         set.x = 0;
  800.         set.y = 0;
  801.         set.fb = NULL;
  802.         set.mode = NULL;
  803.         set.connectors = NULL;
  804.         set.num_connectors = 0;
  805.  
  806.         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  807.                 set.crtc = crtc;
  808.                 ret = drm_mode_set_config_internal(&set);
  809.                 WARN_ON(ret != 0);
  810.         }
  811.  
  812. }
  813.  
  814. static void vmw_master_init(struct vmw_master *vmaster)
  815. {
  816.         ttm_lock_init(&vmaster->lock);
  817.         INIT_LIST_HEAD(&vmaster->fb_surf);
  818.         mutex_init(&vmaster->fb_surf_mutex);
  819. }
  820.  
  821. static int vmw_master_create(struct drm_device *dev,
  822.                              struct drm_master *master)
  823. {
  824.         struct vmw_master *vmaster;
  825.  
  826.         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
  827.         if (unlikely(vmaster == NULL))
  828.                 return -ENOMEM;
  829.  
  830.         vmw_master_init(vmaster);
  831.         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  832.         master->driver_priv = vmaster;
  833.  
  834.         return 0;
  835. }
  836.  
  837. static void vmw_master_destroy(struct drm_device *dev,
  838.                                struct drm_master *master)
  839. {
  840.         struct vmw_master *vmaster = vmw_master(master);
  841.  
  842.         master->driver_priv = NULL;
  843.         kfree(vmaster);
  844. }
  845.  
  846.  
  847. static int vmw_master_set(struct drm_device *dev,
  848.                           struct drm_file *file_priv,
  849.                           bool from_open)
  850. {
  851.         struct vmw_private *dev_priv = vmw_priv(dev);
  852.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  853.         struct vmw_master *active = dev_priv->active_master;
  854.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  855.         int ret = 0;
  856.  
  857.         if (!dev_priv->enable_fb) {
  858.                 ret = vmw_3d_resource_inc(dev_priv, true);
  859.                 if (unlikely(ret != 0))
  860.                         return ret;
  861.                 vmw_kms_save_vga(dev_priv);
  862.                 mutex_lock(&dev_priv->hw_mutex);
  863.                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
  864.                 mutex_unlock(&dev_priv->hw_mutex);
  865.         }
  866.  
  867.         if (active) {
  868.                 BUG_ON(active != &dev_priv->fbdev_master);
  869.                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
  870.                 if (unlikely(ret != 0))
  871.                         goto out_no_active_lock;
  872.  
  873.                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
  874.                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
  875.                 if (unlikely(ret != 0)) {
  876.                         DRM_ERROR("Unable to clean VRAM on "
  877.                                   "master drop.\n");
  878.                 }
  879.  
  880.                 dev_priv->active_master = NULL;
  881.         }
  882.  
  883.         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  884.         if (!from_open) {
  885.                 ttm_vt_unlock(&vmaster->lock);
  886.                 BUG_ON(vmw_fp->locked_master != file_priv->master);
  887.                 drm_master_put(&vmw_fp->locked_master);
  888.         }
  889.  
  890.         dev_priv->active_master = vmaster;
  891.  
  892.         return 0;
  893.  
  894. out_no_active_lock:
  895.         if (!dev_priv->enable_fb) {
  896.                 vmw_kms_restore_vga(dev_priv);
  897.                 vmw_3d_resource_dec(dev_priv, true);
  898.                 mutex_lock(&dev_priv->hw_mutex);
  899.                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
  900.                 mutex_unlock(&dev_priv->hw_mutex);
  901.         }
  902.         return ret;
  903. }
  904.  
  905. static void vmw_master_drop(struct drm_device *dev,
  906.                             struct drm_file *file_priv,
  907.                             bool from_release)
  908. {
  909.         struct vmw_private *dev_priv = vmw_priv(dev);
  910.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  911.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  912.         int ret;
  913.  
  914.         /**
  915.          * Make sure the master doesn't disappear while we have
  916.          * it locked.
  917.          */
  918.  
  919.         vmw_fp->locked_master = drm_master_get(file_priv->master);
  920.         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
  921.         vmw_execbuf_release_pinned_bo(dev_priv);
  922.  
  923.         if (unlikely((ret != 0))) {
  924.                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
  925.                 drm_master_put(&vmw_fp->locked_master);
  926.         }
  927.  
  928.         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  929.  
  930.         if (!dev_priv->enable_fb) {
  931.                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
  932.                 if (unlikely(ret != 0))
  933.                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
  934.                 vmw_kms_restore_vga(dev_priv);
  935.                 vmw_3d_resource_dec(dev_priv, true);
  936.                 mutex_lock(&dev_priv->hw_mutex);
  937.                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
  938.                 mutex_unlock(&dev_priv->hw_mutex);
  939.         }
  940.  
  941.         dev_priv->active_master = &dev_priv->fbdev_master;
  942.         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  943.         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
  944.  
  945.         if (dev_priv->enable_fb)
  946.                 vmw_fb_on(dev_priv);
  947. }
  948.  
  949.  
  950. static void vmw_remove(struct pci_dev *pdev)
  951. {
  952.         struct drm_device *dev = pci_get_drvdata(pdev);
  953.  
  954.         drm_put_dev(dev);
  955. }
  956.  
  957. static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
  958.                               void *ptr)
  959. {
  960.         struct vmw_private *dev_priv =
  961.                 container_of(nb, struct vmw_private, pm_nb);
  962.         struct vmw_master *vmaster = dev_priv->active_master;
  963.  
  964.         switch (val) {
  965.         case PM_HIBERNATION_PREPARE:
  966.         case PM_SUSPEND_PREPARE:
  967.                 ttm_suspend_lock(&vmaster->lock);
  968.  
  969.                 /**
  970.                  * This empties VRAM and unbinds all GMR bindings.
  971.                  * Buffer contents is moved to swappable memory.
  972.                  */
  973.                 vmw_execbuf_release_pinned_bo(dev_priv);
  974.                 vmw_resource_evict_all(dev_priv);
  975.                 ttm_bo_swapout_all(&dev_priv->bdev);
  976.  
  977.                 break;
  978.         case PM_POST_HIBERNATION:
  979.         case PM_POST_SUSPEND:
  980.         case PM_POST_RESTORE:
  981.                 ttm_suspend_unlock(&vmaster->lock);
  982.  
  983.                 break;
  984.         case PM_RESTORE_PREPARE:
  985.                 break;
  986.         default:
  987.                 break;
  988.         }
  989.         return 0;
  990. }
  991.  
  992. /**
  993.  * These might not be needed with the virtual SVGA device.
  994.  */
  995.  
  996. static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  997. {
  998.         struct drm_device *dev = pci_get_drvdata(pdev);
  999.         struct vmw_private *dev_priv = vmw_priv(dev);
  1000.  
  1001.         if (dev_priv->num_3d_resources != 0) {
  1002.                 DRM_INFO("Can't suspend or hibernate "
  1003.                          "while 3D resources are active.\n");
  1004.                 return -EBUSY;
  1005.         }
  1006.  
  1007.         pci_save_state(pdev);
  1008.         pci_disable_device(pdev);
  1009.         pci_set_power_state(pdev, PCI_D3hot);
  1010.         return 0;
  1011. }
  1012.  
  1013. static int vmw_pci_resume(struct pci_dev *pdev)
  1014. {
  1015.         pci_set_power_state(pdev, PCI_D0);
  1016.         pci_restore_state(pdev);
  1017.         return pci_enable_device(pdev);
  1018. }
  1019.  
  1020. static int vmw_pm_suspend(struct device *kdev)
  1021. {
  1022.         struct pci_dev *pdev = to_pci_dev(kdev);
  1023.         struct pm_message dummy;
  1024.  
  1025.         dummy.event = 0;
  1026.  
  1027.         return vmw_pci_suspend(pdev, dummy);
  1028. }
  1029.  
  1030. static int vmw_pm_resume(struct device *kdev)
  1031. {
  1032.         struct pci_dev *pdev = to_pci_dev(kdev);
  1033.  
  1034.         return vmw_pci_resume(pdev);
  1035. }
  1036.  
  1037. static int vmw_pm_prepare(struct device *kdev)
  1038. {
  1039.         struct pci_dev *pdev = to_pci_dev(kdev);
  1040.         struct drm_device *dev = pci_get_drvdata(pdev);
  1041.         struct vmw_private *dev_priv = vmw_priv(dev);
  1042.  
  1043.         /**
  1044.          * Release 3d reference held by fbdev and potentially
  1045.          * stop fifo.
  1046.          */
  1047.         dev_priv->suspended = true;
  1048.         if (dev_priv->enable_fb)
  1049.                         vmw_3d_resource_dec(dev_priv, true);
  1050.  
  1051.         if (dev_priv->num_3d_resources != 0) {
  1052.  
  1053.                 DRM_INFO("Can't suspend or hibernate "
  1054.                          "while 3D resources are active.\n");
  1055.  
  1056.                 if (dev_priv->enable_fb)
  1057.                         vmw_3d_resource_inc(dev_priv, true);
  1058.                 dev_priv->suspended = false;
  1059.                 return -EBUSY;
  1060.         }
  1061.  
  1062.         return 0;
  1063. }
  1064.  
  1065. #endif
  1066.  
  1067.  
  1068. static struct drm_driver driver = {
  1069.         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
  1070.         DRIVER_MODESET,
  1071.    .load = vmw_driver_load,
  1072. //   .unload = vmw_driver_unload,
  1073. //   .firstopen = vmw_firstopen,
  1074. //   .lastclose = vmw_lastclose,
  1075.    .irq_preinstall = vmw_irq_preinstall,
  1076.    .irq_postinstall = vmw_irq_postinstall,
  1077. //   .irq_uninstall = vmw_irq_uninstall,
  1078.    .irq_handler = vmw_irq_handler,
  1079. //   .get_vblank_counter = vmw_get_vblank_counter,
  1080. //   .enable_vblank = vmw_enable_vblank,
  1081. //   .disable_vblank = vmw_disable_vblank,
  1082. //   .ioctls = vmw_ioctls,
  1083. //   .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
  1084. //   .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
  1085. //   .master_create = vmw_master_create,
  1086. //   .master_destroy = vmw_master_destroy,
  1087. //   .master_set = vmw_master_set,
  1088. //   .master_drop = vmw_master_drop,
  1089.      .open = vmw_driver_open,
  1090. //   .preclose = vmw_preclose,
  1091. //   .postclose = vmw_postclose,
  1092.  
  1093. //   .dumb_create = vmw_dumb_create,
  1094. //   .dumb_map_offset = vmw_dumb_map_offset,
  1095. //   .dumb_destroy = vmw_dumb_destroy,
  1096.  
  1097. //   .fops = &vmwgfx_driver_fops,
  1098. //   .name = VMWGFX_DRIVER_NAME,
  1099. //   .desc = VMWGFX_DRIVER_DESC,
  1100. //   .date = VMWGFX_DRIVER_DATE,
  1101. //   .major = VMWGFX_DRIVER_MAJOR,
  1102. //   .minor = VMWGFX_DRIVER_MINOR,
  1103. //   .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
  1104. };
  1105.  
  1106. #if 0
  1107. static struct pci_driver vmw_pci_driver = {
  1108.         .name = VMWGFX_DRIVER_NAME,
  1109.         .id_table = vmw_pci_id_list,
  1110.         .probe = vmw_probe,
  1111.         .remove = vmw_remove,
  1112.         .driver = {
  1113.                 .pm = &vmw_pm_ops
  1114.         }
  1115. };
  1116.  
  1117. static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  1118. {
  1119.         return drm_get_pci_dev(pdev, ent, &driver);
  1120. }
  1121. #endif
  1122.  
  1123. int vmw_init(void)
  1124. {
  1125.     static pci_dev_t device;
  1126.     const struct pci_device_id  *ent;
  1127.     int  err;
  1128.  
  1129.     ENTER();
  1130.  
  1131.     ent = find_pci_device(&device, vmw_pci_id_list);
  1132.     if( unlikely(ent == NULL) )
  1133.     {
  1134.         dbgprintf("device not found\n");
  1135.         return -ENODEV;
  1136.     };
  1137.  
  1138.     drm_core_init();
  1139.  
  1140.     DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
  1141.                                 device.pci_dev.device);
  1142.  
  1143.     err = drm_get_pci_dev(&device.pci_dev, ent, &driver);
  1144.     LEAVE();
  1145.  
  1146.     return err;
  1147. }
  1148.  
  1149.  
  1150. MODULE_AUTHOR("VMware Inc. and others");
  1151. MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
  1152. MODULE_LICENSE("GPL and additional rights");
  1153.