Subversion Repositories Kolibri OS

Rev

Rev 4080 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27. #include <linux/module.h>
  28.  
  29. #include <drm/drmP.h>
  30. #include "vmwgfx_drv.h"
  31. #include <drm/ttm/ttm_placement.h>
  32. #include <drm/ttm/ttm_bo_driver.h>
  33. #include <drm/ttm/ttm_object.h>
  34. //#include <drm/ttm/ttm_module.h>
  35.  
  36. #define VMWGFX_DRIVER_NAME "vmwgfx"
  37. #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices"
  38. #define VMWGFX_CHIP_SVGAII 0
  39. #define VMW_FB_RESERVATION 0
  40.  
  41. #define VMW_MIN_INITIAL_WIDTH 800
  42. #define VMW_MIN_INITIAL_HEIGHT 600
  43.  
  44. struct drm_device *main_device;
  45.  
  46. struct drm_file *drm_file_handlers[256];
  47.  
  48. #if 0
  49. /**
  50.  * Fully encoded drm commands. Might move to vmw_drm.h
  51.  */
  52.  
  53. #define DRM_IOCTL_VMW_GET_PARAM                                 \
  54.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_GET_PARAM,          \
  55.                  struct drm_vmw_getparam_arg)
  56. #define DRM_IOCTL_VMW_ALLOC_DMABUF                              \
  57.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_ALLOC_DMABUF,       \
  58.                 union drm_vmw_alloc_dmabuf_arg)
  59. #define DRM_IOCTL_VMW_UNREF_DMABUF                              \
  60.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_DMABUF,        \
  61.                 struct drm_vmw_unref_dmabuf_arg)
  62. #define DRM_IOCTL_VMW_CURSOR_BYPASS                             \
  63.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CURSOR_BYPASS,       \
  64.                  struct drm_vmw_cursor_bypass_arg)
  65.  
  66. #define DRM_IOCTL_VMW_CONTROL_STREAM                            \
  67.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_CONTROL_STREAM,      \
  68.                  struct drm_vmw_control_stream_arg)
  69. #define DRM_IOCTL_VMW_CLAIM_STREAM                              \
  70.         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CLAIM_STREAM,        \
  71.                  struct drm_vmw_stream_arg)
  72. #define DRM_IOCTL_VMW_UNREF_STREAM                              \
  73.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_STREAM,        \
  74.                  struct drm_vmw_stream_arg)
  75.  
  76. #define DRM_IOCTL_VMW_CREATE_CONTEXT                            \
  77.         DRM_IOR(DRM_COMMAND_BASE + DRM_VMW_CREATE_CONTEXT,      \
  78.                 struct drm_vmw_context_arg)
  79. #define DRM_IOCTL_VMW_UNREF_CONTEXT                             \
  80.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_CONTEXT,       \
  81.                 struct drm_vmw_context_arg)
  82. #define DRM_IOCTL_VMW_CREATE_SURFACE                            \
  83.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_SURFACE,     \
  84.                  union drm_vmw_surface_create_arg)
  85. #define DRM_IOCTL_VMW_UNREF_SURFACE                             \
  86.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UNREF_SURFACE,       \
  87.                  struct drm_vmw_surface_arg)
  88. #define DRM_IOCTL_VMW_REF_SURFACE                               \
  89.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_REF_SURFACE,        \
  90.                  union drm_vmw_surface_reference_arg)
  91. #define DRM_IOCTL_VMW_EXECBUF                                   \
  92.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_EXECBUF,             \
  93.                 struct drm_vmw_execbuf_arg)
  94. #define DRM_IOCTL_VMW_GET_3D_CAP                                \
  95.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_GET_3D_CAP,          \
  96.                  struct drm_vmw_get_3d_cap_arg)
  97. #define DRM_IOCTL_VMW_FENCE_WAIT                                \
  98.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_WAIT,         \
  99.                  struct drm_vmw_fence_wait_arg)
  100. #define DRM_IOCTL_VMW_FENCE_SIGNALED                            \
  101.         DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_FENCE_SIGNALED,     \
  102.                  struct drm_vmw_fence_signaled_arg)
  103. #define DRM_IOCTL_VMW_FENCE_UNREF                               \
  104.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,         \
  105.                  struct drm_vmw_fence_arg)
  106. #define DRM_IOCTL_VMW_FENCE_EVENT                               \
  107.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_EVENT,         \
  108.                  struct drm_vmw_fence_event_arg)
  109. #define DRM_IOCTL_VMW_PRESENT                                   \
  110.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,             \
  111.                  struct drm_vmw_present_arg)
  112. #define DRM_IOCTL_VMW_PRESENT_READBACK                          \
  113.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,    \
  114.                  struct drm_vmw_present_readback_arg)
  115. #define DRM_IOCTL_VMW_UPDATE_LAYOUT                             \
  116.         DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT,       \
  117.                  struct drm_vmw_update_layout_arg)
  118.  
  119. /**
  120.  * The core DRM version of this macro doesn't account for
  121.  * DRM_COMMAND_BASE.
  122.  */
  123.  
  124. #define VMW_IOCTL_DEF(ioctl, func, flags) \
  125.   [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = {DRM_##ioctl, flags, func, DRM_IOCTL_##ioctl}
  126.  
  127. /**
  128.  * Ioctl definitions.
  129.  */
  130.  
  131. static struct drm_ioctl_desc vmw_ioctls[] = {
  132.         VMW_IOCTL_DEF(VMW_GET_PARAM, vmw_getparam_ioctl,
  133.                       DRM_AUTH | DRM_UNLOCKED),
  134.         VMW_IOCTL_DEF(VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
  135.                       DRM_AUTH | DRM_UNLOCKED),
  136.         VMW_IOCTL_DEF(VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
  137.                       DRM_AUTH | DRM_UNLOCKED),
  138.         VMW_IOCTL_DEF(VMW_CURSOR_BYPASS,
  139.                       vmw_kms_cursor_bypass_ioctl,
  140.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  141.  
  142.         VMW_IOCTL_DEF(VMW_CONTROL_STREAM, vmw_overlay_ioctl,
  143.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  144.         VMW_IOCTL_DEF(VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
  145.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  146.         VMW_IOCTL_DEF(VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
  147.                       DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
  148.  
  149.         VMW_IOCTL_DEF(VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
  150.                       DRM_AUTH | DRM_UNLOCKED),
  151.         VMW_IOCTL_DEF(VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
  152.                       DRM_AUTH | DRM_UNLOCKED),
  153.         VMW_IOCTL_DEF(VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
  154.                       DRM_AUTH | DRM_UNLOCKED),
  155.         VMW_IOCTL_DEF(VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
  156.                       DRM_AUTH | DRM_UNLOCKED),
  157.         VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
  158.                       DRM_AUTH | DRM_UNLOCKED),
  159.         VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl,
  160.                       DRM_AUTH | DRM_UNLOCKED),
  161.         VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
  162.                       DRM_AUTH | DRM_UNLOCKED),
  163.         VMW_IOCTL_DEF(VMW_FENCE_SIGNALED,
  164.                       vmw_fence_obj_signaled_ioctl,
  165.                       DRM_AUTH | DRM_UNLOCKED),
  166.         VMW_IOCTL_DEF(VMW_FENCE_UNREF, vmw_fence_obj_unref_ioctl,
  167.                       DRM_AUTH | DRM_UNLOCKED),
  168.         VMW_IOCTL_DEF(VMW_FENCE_EVENT,
  169.                       vmw_fence_event_ioctl,
  170.                       DRM_AUTH | DRM_UNLOCKED),
  171.         VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
  172.                       DRM_AUTH | DRM_UNLOCKED),
  173.  
  174.         /* these allow direct access to the framebuffers mark as master only */
  175.         VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
  176.                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
  177.         VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
  178.                       vmw_present_readback_ioctl,
  179.                       DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
  180.         VMW_IOCTL_DEF(VMW_UPDATE_LAYOUT,
  181.                       vmw_kms_update_layout_ioctl,
  182.                       DRM_MASTER | DRM_UNLOCKED),
  183. };
  184. #endif
  185.  
  186. static struct pci_device_id vmw_pci_id_list[] = {
  187.         {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII},
  188.         {0, 0, 0}
  189. };
  190.  
  191. static int enable_fbdev = 1;
  192.  
  193. static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
  194. static void vmw_master_init(struct vmw_master *);
  195.  
  196. MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
  197. module_param_named(enable_fbdev, enable_fbdev, int, 0600);
  198.  
  199. static void vmw_print_capabilities(uint32_t capabilities)
  200. {
  201.         DRM_INFO("Capabilities:\n");
  202.         if (capabilities & SVGA_CAP_RECT_COPY)
  203.                 DRM_INFO("  Rect copy.\n");
  204.         if (capabilities & SVGA_CAP_CURSOR)
  205.                 DRM_INFO("  Cursor.\n");
  206.         if (capabilities & SVGA_CAP_CURSOR_BYPASS)
  207.                 DRM_INFO("  Cursor bypass.\n");
  208.         if (capabilities & SVGA_CAP_CURSOR_BYPASS_2)
  209.                 DRM_INFO("  Cursor bypass 2.\n");
  210.         if (capabilities & SVGA_CAP_8BIT_EMULATION)
  211.                 DRM_INFO("  8bit emulation.\n");
  212.         if (capabilities & SVGA_CAP_ALPHA_CURSOR)
  213.                 DRM_INFO("  Alpha cursor.\n");
  214.         if (capabilities & SVGA_CAP_3D)
  215.                 DRM_INFO("  3D.\n");
  216.         if (capabilities & SVGA_CAP_EXTENDED_FIFO)
  217.                 DRM_INFO("  Extended Fifo.\n");
  218.         if (capabilities & SVGA_CAP_MULTIMON)
  219.                 DRM_INFO("  Multimon.\n");
  220.         if (capabilities & SVGA_CAP_PITCHLOCK)
  221.                 DRM_INFO("  Pitchlock.\n");
  222.         if (capabilities & SVGA_CAP_IRQMASK)
  223.                 DRM_INFO("  Irq mask.\n");
  224.         if (capabilities & SVGA_CAP_DISPLAY_TOPOLOGY)
  225.                 DRM_INFO("  Display Topology.\n");
  226.         if (capabilities & SVGA_CAP_GMR)
  227.                 DRM_INFO("  GMR.\n");
  228.         if (capabilities & SVGA_CAP_TRACES)
  229.                 DRM_INFO("  Traces.\n");
  230.         if (capabilities & SVGA_CAP_GMR2)
  231.                 DRM_INFO("  GMR2.\n");
  232.         if (capabilities & SVGA_CAP_SCREEN_OBJECT_2)
  233.                 DRM_INFO("  Screen Object 2.\n");
  234. }
  235.  
  236.  
  237. /**
  238.  * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
  239.  * the start of a buffer object.
  240.  *
  241.  * @dev_priv: The device private structure.
  242.  *
  243.  * This function will idle the buffer using an uninterruptible wait, then
  244.  * map the first page and initialize a pending occlusion query result structure,
  245.  * Finally it will unmap the buffer.
  246.  *
  247.  * TODO: Since we're only mapping a single page, we should optimize the map
  248.  * to use kmap_atomic / iomap_atomic.
  249.  */
  250. static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
  251. {
  252.         struct ttm_bo_kmap_obj map;
  253.         volatile SVGA3dQueryResult *result;
  254.         bool dummy;
  255.         int ret;
  256.         struct ttm_bo_device *bdev = &dev_priv->bdev;
  257.         struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
  258.  
  259.         ttm_bo_reserve(bo, false, false, false, 0);
  260.         spin_lock(&bdev->fence_lock);
  261.     ret = 0; //ttm_bo_wait(bo, false, false, false);
  262.         spin_unlock(&bdev->fence_lock);
  263.         if (unlikely(ret != 0))
  264.                 (void) vmw_fallback_wait(dev_priv, false, true, 0, false,
  265.                                          10*HZ);
  266. /*
  267.         ret = ttm_bo_kmap(bo, 0, 1, &map);
  268.         if (likely(ret == 0)) {
  269.                 result = ttm_kmap_obj_virtual(&map, &dummy);
  270.                 result->totalSize = sizeof(*result);
  271.                 result->state = SVGA3D_QUERYSTATE_PENDING;
  272.                 result->result32 = 0xff;
  273.                 ttm_bo_kunmap(&map);
  274.         } else
  275.                 DRM_ERROR("Dummy query buffer map failed.\n");
  276. */
  277.         ttm_bo_unreserve(bo);
  278. }
  279.  
  280.  
  281. /**
  282.  * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
  283.  *
  284.  * @dev_priv: A device private structure.
  285.  *
  286.  * This function creates a small buffer object that holds the query
  287.  * result for dummy queries emitted as query barriers.
  288.  * No interruptible waits are done within this function.
  289.  *
  290.  * Returns an error if bo creation fails.
  291.  */
  292. static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
  293. {
  294.         return ttm_bo_create(&dev_priv->bdev,
  295.                              PAGE_SIZE,
  296.                              ttm_bo_type_device,
  297.                              &vmw_vram_sys_placement,
  298.                              0, false, NULL,
  299.                              &dev_priv->dummy_query_bo);
  300. }
  301.  
  302.  
  303. static int vmw_request_device(struct vmw_private *dev_priv)
  304. {
  305.         int ret;
  306.     ENTER();
  307.  
  308.         ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
  309.         if (unlikely(ret != 0)) {
  310.                 DRM_ERROR("Unable to initialize FIFO.\n");
  311.                 return ret;
  312.         }
  313. //   vmw_fence_fifo_up(dev_priv->fman);
  314. //   ret = vmw_dummy_query_bo_create(dev_priv);
  315. //   if (unlikely(ret != 0))
  316. //       goto out_no_query_bo;
  317. //   vmw_dummy_query_bo_prepare(dev_priv);
  318.  
  319.     LEAVE();
  320.  
  321.         return 0;
  322.  
  323. out_no_query_bo:
  324.         vmw_fence_fifo_down(dev_priv->fman);
  325.         vmw_fifo_release(dev_priv, &dev_priv->fifo);
  326.         return ret;
  327. }
  328.  
  329. static void vmw_release_device(struct vmw_private *dev_priv)
  330. {
  331.         /*
  332.          * Previous destructions should've released
  333.          * the pinned bo.
  334.          */
  335.  
  336.         BUG_ON(dev_priv->pinned_bo != NULL);
  337.  
  338.         ttm_bo_unref(&dev_priv->dummy_query_bo);
  339.         vmw_fence_fifo_down(dev_priv->fman);
  340.         vmw_fifo_release(dev_priv, &dev_priv->fifo);
  341. }
  342.  
  343. /**
  344.  * Increase the 3d resource refcount.
  345.  * If the count was prevously zero, initialize the fifo, switching to svga
  346.  * mode. Note that the master holds a ref as well, and may request an
  347.  * explicit switch to svga mode if fb is not running, using @unhide_svga.
  348.  */
  349. int vmw_3d_resource_inc(struct vmw_private *dev_priv,
  350.                         bool unhide_svga)
  351. {
  352.         int ret = 0;
  353.  
  354.     ENTER();
  355.  
  356.         mutex_lock(&dev_priv->release_mutex);
  357.         if (unlikely(dev_priv->num_3d_resources++ == 0)) {
  358.         ret = vmw_request_device(dev_priv);
  359.                 if (unlikely(ret != 0))
  360.                         --dev_priv->num_3d_resources;
  361.         } else if (unhide_svga) {
  362.                 mutex_lock(&dev_priv->hw_mutex);
  363.                 vmw_write(dev_priv, SVGA_REG_ENABLE,
  364.                           vmw_read(dev_priv, SVGA_REG_ENABLE) &
  365.                           ~SVGA_REG_ENABLE_HIDE);
  366.                 mutex_unlock(&dev_priv->hw_mutex);
  367.         }
  368.  
  369.         mutex_unlock(&dev_priv->release_mutex);
  370.     LEAVE();
  371.         return ret;
  372. }
  373.  
  374. /**
  375.  * Decrease the 3d resource refcount.
  376.  * If the count reaches zero, disable the fifo, switching to vga mode.
  377.  * Note that the master holds a refcount as well, and may request an
  378.  * explicit switch to vga mode when it releases its refcount to account
  379.  * for the situation of an X server vt switch to VGA with 3d resources
  380.  * active.
  381.  */
  382. void vmw_3d_resource_dec(struct vmw_private *dev_priv,
  383.                          bool hide_svga)
  384. {
  385.         int32_t n3d;
  386.  
  387.         mutex_lock(&dev_priv->release_mutex);
  388.         if (unlikely(--dev_priv->num_3d_resources == 0))
  389.                 vmw_release_device(dev_priv);
  390.         else if (hide_svga) {
  391.                 mutex_lock(&dev_priv->hw_mutex);
  392.                 vmw_write(dev_priv, SVGA_REG_ENABLE,
  393.                           vmw_read(dev_priv, SVGA_REG_ENABLE) |
  394.                           SVGA_REG_ENABLE_HIDE);
  395.                 mutex_unlock(&dev_priv->hw_mutex);
  396.         }
  397.  
  398.         n3d = (int32_t) dev_priv->num_3d_resources;
  399.         mutex_unlock(&dev_priv->release_mutex);
  400.  
  401.         BUG_ON(n3d < 0);
  402. }
  403.  
  404. /**
  405.  * Sets the initial_[width|height] fields on the given vmw_private.
  406.  *
  407.  * It does so by reading SVGA_REG_[WIDTH|HEIGHT] regs and then
  408.  * clamping the value to fb_max_[width|height] fields and the
  409.  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  410.  * If the values appear to be invalid, set them to
  411.  * VMW_MIN_INITIAL_[WIDTH|HEIGHT].
  412.  */
  413. static void vmw_get_initial_size(struct vmw_private *dev_priv)
  414. {
  415.         uint32_t width;
  416.         uint32_t height;
  417.  
  418.         width = vmw_read(dev_priv, SVGA_REG_WIDTH);
  419.         height = vmw_read(dev_priv, SVGA_REG_HEIGHT);
  420.  
  421.         width = max_t(uint32_t, width, VMW_MIN_INITIAL_WIDTH);
  422.         height = max_t(uint32_t, height, VMW_MIN_INITIAL_HEIGHT);
  423.  
  424.         if (width > dev_priv->fb_max_width ||
  425.             height > dev_priv->fb_max_height) {
  426.  
  427.                 /*
  428.                  * This is a host error and shouldn't occur.
  429.                  */
  430.  
  431.                 width = VMW_MIN_INITIAL_WIDTH;
  432.                 height = VMW_MIN_INITIAL_HEIGHT;
  433.         }
  434.  
  435.         dev_priv->initial_width = width;
  436.         dev_priv->initial_height = height;
  437. }
  438.  
  439. static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
  440. {
  441.         struct vmw_private *dev_priv;
  442.         int ret;
  443.         uint32_t svga_id;
  444.         enum vmw_res_type i;
  445.  
  446.     ENTER();
  447.  
  448.         dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
  449.         if (unlikely(dev_priv == NULL)) {
  450.                 DRM_ERROR("Failed allocating a device private struct.\n");
  451.                 return -ENOMEM;
  452.         }
  453.  
  454.         pci_set_master(dev->pdev);
  455.  
  456.         dev_priv->dev = dev;
  457.         dev_priv->vmw_chipset = chipset;
  458.         dev_priv->last_read_seqno = (uint32_t) -100;
  459.         mutex_init(&dev_priv->hw_mutex);
  460.         mutex_init(&dev_priv->cmdbuf_mutex);
  461.         mutex_init(&dev_priv->release_mutex);
  462.         rwlock_init(&dev_priv->resource_lock);
  463.  
  464.         for (i = vmw_res_context; i < vmw_res_max; ++i) {
  465.                 idr_init(&dev_priv->res_idr[i]);
  466.                 INIT_LIST_HEAD(&dev_priv->res_lru[i]);
  467.         }
  468.  
  469.         mutex_init(&dev_priv->init_mutex);
  470.         init_waitqueue_head(&dev_priv->fence_queue);
  471.         init_waitqueue_head(&dev_priv->fifo_queue);
  472.         dev_priv->fence_queue_waiters = 0;
  473.         atomic_set(&dev_priv->fifo_queue_waiters, 0);
  474.  
  475.         dev_priv->used_memory_size = 0;
  476.  
  477.         dev_priv->io_start = pci_resource_start(dev->pdev, 0);
  478.         dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
  479.         dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
  480.  
  481.     printk("io: %x vram: %x mmio: %x\n",dev_priv->io_start,
  482.             dev_priv->vram_start,dev_priv->mmio_start);
  483.  
  484.         dev_priv->enable_fb = enable_fbdev;
  485.  
  486.         mutex_lock(&dev_priv->hw_mutex);
  487.  
  488.     vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
  489.         svga_id = vmw_read(dev_priv, SVGA_REG_ID);
  490.         if (svga_id != SVGA_ID_2) {
  491.                 ret = -ENOSYS;
  492.                 DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
  493.                 mutex_unlock(&dev_priv->hw_mutex);
  494.                 goto out_err0;
  495.         }
  496.  
  497.         dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
  498.  
  499.         dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
  500.         dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
  501.         dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
  502.         dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
  503.  
  504.         vmw_get_initial_size(dev_priv);
  505.  
  506.         if (dev_priv->capabilities & SVGA_CAP_GMR) {
  507.                 dev_priv->max_gmr_descriptors =
  508.                         vmw_read(dev_priv,
  509.                                  SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
  510.                 dev_priv->max_gmr_ids =
  511.                         vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
  512.         }
  513.         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  514.                 dev_priv->max_gmr_pages =
  515.                         vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
  516.                 dev_priv->memory_size =
  517.                         vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
  518.                 dev_priv->memory_size -= dev_priv->vram_size;
  519.         } else {
  520.                 /*
  521.                  * An arbitrary limit of 512MiB on surface
  522.                  * memory. But all HWV8 hardware supports GMR2.
  523.                  */
  524.                 dev_priv->memory_size = 512*1024*1024;
  525.         }
  526.  
  527.         mutex_unlock(&dev_priv->hw_mutex);
  528.  
  529.         vmw_print_capabilities(dev_priv->capabilities);
  530.  
  531.         if (dev_priv->capabilities & SVGA_CAP_GMR) {
  532.                 DRM_INFO("Max GMR ids is %u\n",
  533.                          (unsigned)dev_priv->max_gmr_ids);
  534.                 DRM_INFO("Max GMR descriptors is %u\n",
  535.                          (unsigned)dev_priv->max_gmr_descriptors);
  536.         }
  537.         if (dev_priv->capabilities & SVGA_CAP_GMR2) {
  538.                 DRM_INFO("Max number of GMR pages is %u\n",
  539.                          (unsigned)dev_priv->max_gmr_pages);
  540.                 DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
  541.                          (unsigned)dev_priv->memory_size / 1024);
  542.         }
  543.         DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
  544.                  dev_priv->vram_start, dev_priv->vram_size / 1024);
  545.         DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
  546.                  dev_priv->mmio_start, dev_priv->mmio_size / 1024);
  547.  
  548.         ret = vmw_ttm_global_init(dev_priv);
  549.         if (unlikely(ret != 0))
  550.                 goto out_err0;
  551.  
  552.  
  553.  
  554.  
  555.         ret = ttm_bo_device_init(&dev_priv->bdev,
  556.                                  dev_priv->bo_global_ref.ref.object,
  557.                                  &vmw_bo_driver, VMWGFX_FILE_PAGE_OFFSET,
  558.                                  false);
  559.         if (unlikely(ret != 0)) {
  560.                 DRM_ERROR("Failed initializing TTM buffer object driver.\n");
  561.                 goto out_err1;
  562.         }
  563.  
  564.         ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM,
  565.                              (dev_priv->vram_size >> PAGE_SHIFT));
  566.         if (unlikely(ret != 0)) {
  567.                 DRM_ERROR("Failed initializing memory manager for VRAM.\n");
  568.                 goto out_err2;
  569.         }
  570.  
  571.         dev_priv->has_gmr = true;
  572.         if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
  573.                            dev_priv->max_gmr_ids) != 0) {
  574.                 DRM_INFO("No GMR memory available. "
  575.                          "Graphics memory resources are very limited.\n");
  576.                 dev_priv->has_gmr = false;
  577.         }
  578.  
  579.         dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
  580.                                          dev_priv->mmio_size);
  581.  
  582.         if (unlikely(dev_priv->mmio_virt == NULL)) {
  583.                 ret = -ENOMEM;
  584.                 DRM_ERROR("Failed mapping MMIO.\n");
  585.                 goto out_err3;
  586.         }
  587.  
  588.         /* Need mmio memory to check for fifo pitchlock cap. */
  589.         if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) &&
  590.             !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) &&
  591.             !vmw_fifo_have_pitchlock(dev_priv)) {
  592.                 ret = -ENOSYS;
  593.                 DRM_ERROR("Hardware has no pitchlock\n");
  594.                 goto out_err4;
  595.         }
  596.  
  597.         dev_priv->tdev = ttm_object_device_init
  598.             (dev_priv->mem_global_ref.object, 12);
  599.  
  600.         if (unlikely(dev_priv->tdev == NULL)) {
  601.                 DRM_ERROR("Unable to initialize TTM object management.\n");
  602.                 ret = -ENOMEM;
  603.                 goto out_err4;
  604.         }
  605.  
  606.         dev->dev_private = dev_priv;
  607.  
  608. #if 0
  609.  
  610.         if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
  611.                 ret = drm_irq_install(dev);
  612.                 if (ret != 0) {
  613.                         DRM_ERROR("Failed installing irq: %d\n", ret);
  614.                         goto out_no_irq;
  615.                 }
  616.         }
  617.  
  618.         dev_priv->fman = vmw_fence_manager_init(dev_priv);
  619.         if (unlikely(dev_priv->fman == NULL))
  620.                 goto out_no_fman;
  621.  
  622.         vmw_kms_save_vga(dev_priv);
  623. #endif
  624.  
  625.         /* Start kms and overlay systems, needs fifo. */
  626.         ret = vmw_kms_init(dev_priv);
  627.         if (unlikely(ret != 0))
  628.                 goto out_no_kms;
  629.  
  630.    if (dev_priv->enable_fb) {
  631.        ret = vmw_3d_resource_inc(dev_priv, true);
  632.        if (unlikely(ret != 0))
  633.            goto out_no_fifo;
  634. //       vmw_fb_init(dev_priv);
  635.    }
  636.  
  637.         return 0;
  638.  
  639. out_no_fifo:
  640. //   vmw_overlay_close(dev_priv);
  641. //   vmw_kms_close(dev_priv);
  642. out_no_kms:
  643. //   vmw_kms_restore_vga(dev_priv);
  644. //   vmw_fence_manager_takedown(dev_priv->fman);
  645. out_no_fman:
  646. //   if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  647. //       drm_irq_uninstall(dev_priv->dev);
  648. out_no_irq:
  649. //   if (dev_priv->stealth)
  650. //       pci_release_region(dev->pdev, 2);
  651. //   else
  652. //       pci_release_regions(dev->pdev);
  653. out_no_device:
  654. //   ttm_object_device_release(&dev_priv->tdev);
  655. out_err4:
  656. //   iounmap(dev_priv->mmio_virt);
  657. out_err3:
  658. //   arch_phys_wc_del(dev_priv->mmio_mtrr);
  659. //   if (dev_priv->has_gmr)
  660. //       (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  661. //   (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  662. out_err2:
  663. //   (void)ttm_bo_device_release(&dev_priv->bdev);
  664. out_err1:
  665. //   vmw_ttm_global_release(dev_priv);
  666. out_err0:
  667. //   for (i = vmw_res_context; i < vmw_res_max; ++i)
  668. //       idr_destroy(&dev_priv->res_idr[i]);
  669.  
  670.         kfree(dev_priv);
  671.         return ret;
  672. }
  673.  
  674. #if 0
  675. static int vmw_driver_unload(struct drm_device *dev)
  676. {
  677.         struct vmw_private *dev_priv = vmw_priv(dev);
  678.         enum vmw_res_type i;
  679.  
  680.         unregister_pm_notifier(&dev_priv->pm_nb);
  681.  
  682.         if (dev_priv->ctx.res_ht_initialized)
  683.                 drm_ht_remove(&dev_priv->ctx.res_ht);
  684.         if (dev_priv->ctx.cmd_bounce)
  685.                 vfree(dev_priv->ctx.cmd_bounce);
  686.         if (dev_priv->enable_fb) {
  687.                 vmw_fb_close(dev_priv);
  688.                 vmw_kms_restore_vga(dev_priv);
  689.                 vmw_3d_resource_dec(dev_priv, false);
  690.         }
  691.         vmw_kms_close(dev_priv);
  692.         vmw_overlay_close(dev_priv);
  693.         vmw_fence_manager_takedown(dev_priv->fman);
  694.         if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
  695.                 drm_irq_uninstall(dev_priv->dev);
  696.         if (dev_priv->stealth)
  697.                 pci_release_region(dev->pdev, 2);
  698.         else
  699.                 pci_release_regions(dev->pdev);
  700.  
  701.         ttm_object_device_release(&dev_priv->tdev);
  702.         iounmap(dev_priv->mmio_virt);
  703.         arch_phys_wc_del(dev_priv->mmio_mtrr);
  704.         if (dev_priv->has_gmr)
  705.                 (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
  706.         (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
  707.         (void)ttm_bo_device_release(&dev_priv->bdev);
  708.         vmw_ttm_global_release(dev_priv);
  709.  
  710.         for (i = vmw_res_context; i < vmw_res_max; ++i)
  711.                 idr_destroy(&dev_priv->res_idr[i]);
  712.  
  713.         kfree(dev_priv);
  714.  
  715.         return 0;
  716. }
  717.  
  718. static void vmw_preclose(struct drm_device *dev,
  719.                          struct drm_file *file_priv)
  720. {
  721.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  722.         struct vmw_private *dev_priv = vmw_priv(dev);
  723.  
  724.         vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events);
  725. }
  726.  
  727. static void vmw_postclose(struct drm_device *dev,
  728.                          struct drm_file *file_priv)
  729. {
  730.         struct vmw_fpriv *vmw_fp;
  731.  
  732.         vmw_fp = vmw_fpriv(file_priv);
  733.         ttm_object_file_release(&vmw_fp->tfile);
  734.         if (vmw_fp->locked_master)
  735.                 drm_master_put(&vmw_fp->locked_master);
  736.         kfree(vmw_fp);
  737. }
  738. #endif
  739.  
  740. static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
  741. {
  742.         struct vmw_private *dev_priv = vmw_priv(dev);
  743.         struct vmw_fpriv *vmw_fp;
  744.         int ret = -ENOMEM;
  745.  
  746.         vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL);
  747.         if (unlikely(vmw_fp == NULL))
  748.                 return ret;
  749.  
  750.         INIT_LIST_HEAD(&vmw_fp->fence_events);
  751. //   vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10);
  752. //   if (unlikely(vmw_fp->tfile == NULL))
  753. //       goto out_no_tfile;
  754.  
  755.         file_priv->driver_priv = vmw_fp;
  756. //   dev_priv->bdev.dev_mapping = dev->dev_mapping;
  757.  
  758.         return 0;
  759.  
  760. out_no_tfile:
  761.         kfree(vmw_fp);
  762.         return ret;
  763. }
  764.  
  765. #if 0
  766. static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
  767.                                unsigned long arg)
  768. {
  769.         struct drm_file *file_priv = filp->private_data;
  770.         struct drm_device *dev = file_priv->minor->dev;
  771.         unsigned int nr = DRM_IOCTL_NR(cmd);
  772.  
  773.         /*
  774.          * Do extra checking on driver private ioctls.
  775.          */
  776.  
  777.         if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
  778.             && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) {
  779.                 struct drm_ioctl_desc *ioctl =
  780.                     &vmw_ioctls[nr - DRM_COMMAND_BASE];
  781.  
  782.                 if (unlikely(ioctl->cmd_drv != cmd)) {
  783.                         DRM_ERROR("Invalid command format, ioctl %d\n",
  784.                                   nr - DRM_COMMAND_BASE);
  785.                         return -EINVAL;
  786.                 }
  787.         }
  788.  
  789.         return drm_ioctl(filp, cmd, arg);
  790. }
  791.  
  792. static int vmw_firstopen(struct drm_device *dev)
  793. {
  794.         struct vmw_private *dev_priv = vmw_priv(dev);
  795.         dev_priv->is_opened = true;
  796.  
  797.         return 0;
  798. }
  799.  
  800. static void vmw_lastclose(struct drm_device *dev)
  801. {
  802.         struct vmw_private *dev_priv = vmw_priv(dev);
  803.         struct drm_crtc *crtc;
  804.         struct drm_mode_set set;
  805.         int ret;
  806.  
  807.         /**
  808.          * Do nothing on the lastclose call from drm_unload.
  809.          */
  810.  
  811.         if (!dev_priv->is_opened)
  812.                 return;
  813.  
  814.         dev_priv->is_opened = false;
  815.         set.x = 0;
  816.         set.y = 0;
  817.         set.fb = NULL;
  818.         set.mode = NULL;
  819.         set.connectors = NULL;
  820.         set.num_connectors = 0;
  821.  
  822.         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  823.                 set.crtc = crtc;
  824.                 ret = drm_mode_set_config_internal(&set);
  825.                 WARN_ON(ret != 0);
  826.         }
  827.  
  828. }
  829.  
  830. static void vmw_master_init(struct vmw_master *vmaster)
  831. {
  832.         ttm_lock_init(&vmaster->lock);
  833.         INIT_LIST_HEAD(&vmaster->fb_surf);
  834.         mutex_init(&vmaster->fb_surf_mutex);
  835. }
  836.  
  837. static int vmw_master_create(struct drm_device *dev,
  838.                              struct drm_master *master)
  839. {
  840.         struct vmw_master *vmaster;
  841.  
  842.         vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
  843.         if (unlikely(vmaster == NULL))
  844.                 return -ENOMEM;
  845.  
  846.         vmw_master_init(vmaster);
  847.         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  848.         master->driver_priv = vmaster;
  849.  
  850.         return 0;
  851. }
  852.  
  853. static void vmw_master_destroy(struct drm_device *dev,
  854.                                struct drm_master *master)
  855. {
  856.         struct vmw_master *vmaster = vmw_master(master);
  857.  
  858.         master->driver_priv = NULL;
  859.         kfree(vmaster);
  860. }
  861.  
  862.  
  863. static int vmw_master_set(struct drm_device *dev,
  864.                           struct drm_file *file_priv,
  865.                           bool from_open)
  866. {
  867.         struct vmw_private *dev_priv = vmw_priv(dev);
  868.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  869.         struct vmw_master *active = dev_priv->active_master;
  870.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  871.         int ret = 0;
  872.  
  873.         if (!dev_priv->enable_fb) {
  874.                 ret = vmw_3d_resource_inc(dev_priv, true);
  875.                 if (unlikely(ret != 0))
  876.                         return ret;
  877.                 vmw_kms_save_vga(dev_priv);
  878.                 mutex_lock(&dev_priv->hw_mutex);
  879.                 vmw_write(dev_priv, SVGA_REG_TRACES, 0);
  880.                 mutex_unlock(&dev_priv->hw_mutex);
  881.         }
  882.  
  883.         if (active) {
  884.                 BUG_ON(active != &dev_priv->fbdev_master);
  885.                 ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
  886.                 if (unlikely(ret != 0))
  887.                         goto out_no_active_lock;
  888.  
  889.                 ttm_lock_set_kill(&active->lock, true, SIGTERM);
  890.                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
  891.                 if (unlikely(ret != 0)) {
  892.                         DRM_ERROR("Unable to clean VRAM on "
  893.                                   "master drop.\n");
  894.                 }
  895.  
  896.                 dev_priv->active_master = NULL;
  897.         }
  898.  
  899.         ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
  900.         if (!from_open) {
  901.                 ttm_vt_unlock(&vmaster->lock);
  902.                 BUG_ON(vmw_fp->locked_master != file_priv->master);
  903.                 drm_master_put(&vmw_fp->locked_master);
  904.         }
  905.  
  906.         dev_priv->active_master = vmaster;
  907.  
  908.         return 0;
  909.  
  910. out_no_active_lock:
  911.         if (!dev_priv->enable_fb) {
  912.                 vmw_kms_restore_vga(dev_priv);
  913.                 vmw_3d_resource_dec(dev_priv, true);
  914.                 mutex_lock(&dev_priv->hw_mutex);
  915.                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
  916.                 mutex_unlock(&dev_priv->hw_mutex);
  917.         }
  918.         return ret;
  919. }
  920.  
  921. static void vmw_master_drop(struct drm_device *dev,
  922.                             struct drm_file *file_priv,
  923.                             bool from_release)
  924. {
  925.         struct vmw_private *dev_priv = vmw_priv(dev);
  926.         struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
  927.         struct vmw_master *vmaster = vmw_master(file_priv->master);
  928.         int ret;
  929.  
  930.         /**
  931.          * Make sure the master doesn't disappear while we have
  932.          * it locked.
  933.          */
  934.  
  935.         vmw_fp->locked_master = drm_master_get(file_priv->master);
  936.         ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
  937.         vmw_execbuf_release_pinned_bo(dev_priv);
  938.  
  939.         if (unlikely((ret != 0))) {
  940.                 DRM_ERROR("Unable to lock TTM at VT switch.\n");
  941.                 drm_master_put(&vmw_fp->locked_master);
  942.         }
  943.  
  944.         ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
  945.  
  946.         if (!dev_priv->enable_fb) {
  947.                 ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
  948.                 if (unlikely(ret != 0))
  949.                         DRM_ERROR("Unable to clean VRAM on master drop.\n");
  950.                 vmw_kms_restore_vga(dev_priv);
  951.                 vmw_3d_resource_dec(dev_priv, true);
  952.                 mutex_lock(&dev_priv->hw_mutex);
  953.                 vmw_write(dev_priv, SVGA_REG_TRACES, 1);
  954.                 mutex_unlock(&dev_priv->hw_mutex);
  955.         }
  956.  
  957.         dev_priv->active_master = &dev_priv->fbdev_master;
  958.         ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
  959.         ttm_vt_unlock(&dev_priv->fbdev_master.lock);
  960.  
  961.         if (dev_priv->enable_fb)
  962.                 vmw_fb_on(dev_priv);
  963. }
  964.  
  965. #endif
  966.  
  967.  
  968.  
  969.  
  970. static struct drm_driver driver = {
  971.         .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
  972.         DRIVER_MODESET,
  973. //   .load = vmw_driver_load,
  974. //   .unload = vmw_driver_unload,
  975. //   .firstopen = vmw_firstopen,
  976. //   .lastclose = vmw_lastclose,
  977.    .irq_preinstall = vmw_irq_preinstall,
  978.    .irq_postinstall = vmw_irq_postinstall,
  979. //   .irq_uninstall = vmw_irq_uninstall,
  980.    .irq_handler = vmw_irq_handler,
  981. //   .get_vblank_counter = vmw_get_vblank_counter,
  982. //   .enable_vblank = vmw_enable_vblank,
  983. //   .disable_vblank = vmw_disable_vblank,
  984. //   .ioctls = vmw_ioctls,
  985. //   .num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
  986. //   .dma_quiescent = NULL,  /*vmw_dma_quiescent, */
  987. //   .master_create = vmw_master_create,
  988. //   .master_destroy = vmw_master_destroy,
  989. //   .master_set = vmw_master_set,
  990. //   .master_drop = vmw_master_drop,
  991.      .open = vmw_driver_open,
  992. //   .preclose = vmw_preclose,
  993. //   .postclose = vmw_postclose,
  994.  
  995. //   .dumb_create = vmw_dumb_create,
  996. //   .dumb_map_offset = vmw_dumb_map_offset,
  997. //   .dumb_destroy = vmw_dumb_destroy,
  998.  
  999. //   .fops = &vmwgfx_driver_fops,
  1000. //   .name = VMWGFX_DRIVER_NAME,
  1001. //   .desc = VMWGFX_DRIVER_DESC,
  1002. //   .date = VMWGFX_DRIVER_DATE,
  1003. //   .major = VMWGFX_DRIVER_MAJOR,
  1004. //   .minor = VMWGFX_DRIVER_MINOR,
  1005. //   .patchlevel = VMWGFX_DRIVER_PATCHLEVEL
  1006. };
  1007.  
  1008. int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
  1009. {
  1010.     static struct drm_device drm_dev;
  1011.     static struct drm_file   drm_file;
  1012.  
  1013.     struct drm_device *dev;
  1014.     struct drm_file   *priv;
  1015.  
  1016.     int ret;
  1017.  
  1018.     dev  = &drm_dev;
  1019.     priv = &drm_file;
  1020.  
  1021.     drm_file_handlers[0] = priv;
  1022.  
  1023.  //   ret = pci_enable_device(pdev);
  1024.  //   if (ret)
  1025.  //       goto err_g1;
  1026.  
  1027.     pci_set_master(pdev);
  1028.  
  1029.  //   if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
  1030.  //       printk(KERN_ERR "DRM: Fill_in_dev failed.\n");
  1031.  //       goto err_g2;
  1032.  //   }
  1033.  
  1034.     dev->pdev = pdev;
  1035.     dev->pci_device = pdev->device;
  1036.     dev->pci_vendor = pdev->vendor;
  1037.  
  1038.     INIT_LIST_HEAD(&dev->filelist);
  1039.     INIT_LIST_HEAD(&dev->ctxlist);
  1040.     INIT_LIST_HEAD(&dev->vmalist);
  1041.     INIT_LIST_HEAD(&dev->maplist);
  1042.  
  1043.     spin_lock_init(&dev->count_lock);
  1044.     mutex_init(&dev->struct_mutex);
  1045.     mutex_init(&dev->ctxlist_mutex);
  1046.  
  1047.     INIT_LIST_HEAD(&priv->lhead);
  1048.     INIT_LIST_HEAD(&priv->fbs);
  1049.     INIT_LIST_HEAD(&priv->event_list);
  1050.     init_waitqueue_head(&priv->event_wait);
  1051.     priv->event_space = 4096; /* set aside 4k for event buffer */
  1052.  
  1053.     idr_init(&priv->object_idr);
  1054.     spin_lock_init(&priv->table_lock);
  1055.  
  1056.     dev->driver = &driver;
  1057.  
  1058.     if (dev->driver->open) {
  1059.         ret = dev->driver->open(dev, priv);
  1060.         if (ret < 0)
  1061.             goto err_g4;
  1062.     }
  1063.  
  1064.     ret = vmw_driver_load(dev, ent->driver_data );
  1065.  
  1066.     if (ret)
  1067.         goto err_g4;
  1068.  
  1069. //    ret = init_display_kms(dev);
  1070.  
  1071.     if (ret)
  1072.         goto err_g4;
  1073.  
  1074.     return 0;
  1075.  
  1076. err_g4:
  1077. //err_g3:
  1078. //    if (drm_core_check_feature(dev, DRIVER_MODESET))
  1079. //        drm_put_minor(&dev->control);
  1080. //err_g2:
  1081. //    pci_disable_device(pdev);
  1082. //err_g1:
  1083.  
  1084.     return ret;
  1085. }
  1086.  
  1087. int vmw_init(void)
  1088. {
  1089.     static pci_dev_t device;
  1090.     const struct pci_device_id  *ent;
  1091.     int  err;
  1092.  
  1093.     ent = find_pci_device(&device, vmw_pci_id_list);
  1094.     if( unlikely(ent == NULL) )
  1095.     {
  1096.         dbgprintf("device not found\n");
  1097.         return -ENODEV;
  1098.     };
  1099.  
  1100.  
  1101.     DRM_INFO("device %x:%x\n", device.pci_dev.vendor,
  1102.                                 device.pci_dev.device);
  1103.  
  1104.     drm_global_init();
  1105.  
  1106.     err = drm_get_dev(&device.pci_dev, ent);
  1107.  
  1108.     return err;
  1109. }
  1110.  
  1111.  
  1112.  
  1113. //module_init(vmwgfx_init);
  1114. //module_exit(vmwgfx_exit);
  1115.  
  1116. MODULE_AUTHOR("VMware Inc. and others");
  1117. MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
  1118. MODULE_LICENSE("GPL and additional rights");
  1119.  
  1120.