Subversion Repositories Kolibri OS

Rev

Rev 5270 | Rev 6934 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /**************************************************************************
  2.  *
  3.  * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA
  4.  * All Rights Reserved.
  5.  *
  6.  * Permission is hereby granted, free of charge, to any person obtaining a
  7.  * copy of this software and associated documentation files (the
  8.  * "Software"), to deal in the Software without restriction, including
  9.  * without limitation the rights to use, copy, modify, merge, publish,
  10.  * distribute, sub license, and/or sell copies of the Software, and to
  11.  * permit persons to whom the Software is furnished to do so, subject to
  12.  * the following conditions:
  13.  *
  14.  * The above copyright notice and this permission notice (including the
  15.  * next paragraph) shall be included in all copies or substantial portions
  16.  * of the Software.
  17.  *
  18.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19.  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20.  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21.  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22.  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23.  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24.  * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25.  *
  26.  **************************************************************************/
  27.  
  28. #ifndef __VMWGFX_DRM_H__
  29. #define __VMWGFX_DRM_H__
  30.  
  31. #ifndef __KERNEL__
  32. #include <drm/drm.h>
  33. #endif
  34.  
  35. #define DRM_VMW_MAX_SURFACE_FACES 6
  36. #define DRM_VMW_MAX_MIP_LEVELS 24
  37.  
  38.  
  39. #define DRM_VMW_GET_PARAM            0
  40. #define DRM_VMW_ALLOC_DMABUF         1
  41. #define DRM_VMW_UNREF_DMABUF         2
  42. #define DRM_VMW_CURSOR_BYPASS        3
  43. /* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
  44. #define DRM_VMW_CONTROL_STREAM       4
  45. #define DRM_VMW_CLAIM_STREAM         5
  46. #define DRM_VMW_UNREF_STREAM         6
  47. /* guarded by DRM_VMW_PARAM_3D == 1 */
  48. #define DRM_VMW_CREATE_CONTEXT       7
  49. #define DRM_VMW_UNREF_CONTEXT        8
  50. #define DRM_VMW_CREATE_SURFACE       9
  51. #define DRM_VMW_UNREF_SURFACE        10
  52. #define DRM_VMW_REF_SURFACE          11
  53. #define DRM_VMW_EXECBUF              12
  54. #define DRM_VMW_GET_3D_CAP           13
  55. #define DRM_VMW_FENCE_WAIT           14
  56. #define DRM_VMW_FENCE_SIGNALED       15
  57. #define DRM_VMW_FENCE_UNREF          16
  58. #define DRM_VMW_FENCE_EVENT          17
  59. #define DRM_VMW_PRESENT              18
  60. #define DRM_VMW_PRESENT_READBACK     19
  61. #define DRM_VMW_UPDATE_LAYOUT        20
  62. #define DRM_VMW_CREATE_SHADER        21
  63. #define DRM_VMW_UNREF_SHADER         22
  64. #define DRM_VMW_GB_SURFACE_CREATE    23
  65. #define DRM_VMW_GB_SURFACE_REF       24
  66. #define DRM_VMW_SYNCCPU              25
  67. #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
  68.  
  69. /*************************************************************************/
  70. /**
  71.  * DRM_VMW_GET_PARAM - get device information.
  72.  *
  73.  * DRM_VMW_PARAM_FIFO_OFFSET:
  74.  * Offset to use to map the first page of the FIFO read-only.
  75.  * The fifo is mapped using the mmap() system call on the drm device.
  76.  *
  77.  * DRM_VMW_PARAM_OVERLAY_IOCTL:
  78.  * Does the driver support the overlay ioctl.
  79.  */
  80.  
  81. #define DRM_VMW_PARAM_NUM_STREAMS      0
  82. #define DRM_VMW_PARAM_NUM_FREE_STREAMS 1
  83. #define DRM_VMW_PARAM_3D               2
  84. #define DRM_VMW_PARAM_HW_CAPS          3
  85. #define DRM_VMW_PARAM_FIFO_CAPS        4
  86. #define DRM_VMW_PARAM_MAX_FB_SIZE      5
  87. #define DRM_VMW_PARAM_FIFO_HW_VERSION  6
  88. #define DRM_VMW_PARAM_MAX_SURF_MEMORY  7
  89. #define DRM_VMW_PARAM_3D_CAPS_SIZE     8
  90. #define DRM_VMW_PARAM_MAX_MOB_MEMORY   9
  91. #define DRM_VMW_PARAM_MAX_MOB_SIZE     10
  92. #define DRM_VMW_PARAM_SCREEN_TARGET    11
  93. #define DRM_VMW_PARAM_DX               12
  94.  
  95. /**
  96.  * enum drm_vmw_handle_type - handle type for ref ioctls
  97.  *
  98.  */
  99. enum drm_vmw_handle_type {
  100.         DRM_VMW_HANDLE_LEGACY = 0,
  101.         DRM_VMW_HANDLE_PRIME = 1
  102. };
  103.  
  104. /**
  105.  * struct drm_vmw_getparam_arg
  106.  *
  107.  * @value: Returned value. //Out
  108.  * @param: Parameter to query. //In.
  109.  *
  110.  * Argument to the DRM_VMW_GET_PARAM Ioctl.
  111.  */
  112.  
  113. struct drm_vmw_getparam_arg {
  114.         uint64_t value;
  115.         uint32_t param;
  116.         uint32_t pad64;
  117. };
  118.  
  119. /*************************************************************************/
  120. /**
  121.  * DRM_VMW_CREATE_CONTEXT - Create a host context.
  122.  *
  123.  * Allocates a device unique context id, and queues a create context command
  124.  * for the host. Does not wait for host completion.
  125.  */
  126.  
  127. /**
  128.  * struct drm_vmw_context_arg
  129.  *
  130.  * @cid: Device unique context ID.
  131.  *
  132.  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
  133.  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
  134.  */
  135.  
  136. struct drm_vmw_context_arg {
  137.         int32_t cid;
  138.         uint32_t pad64;
  139. };
  140.  
  141. /*************************************************************************/
  142. /**
  143.  * DRM_VMW_UNREF_CONTEXT - Create a host context.
  144.  *
  145.  * Frees a global context id, and queues a destroy host command for the host.
  146.  * Does not wait for host completion. The context ID can be used directly
  147.  * in the command stream and shows up as the same context ID on the host.
  148.  */
  149.  
  150. /*************************************************************************/
  151. /**
  152.  * DRM_VMW_CREATE_SURFACE - Create a host suface.
  153.  *
  154.  * Allocates a device unique surface id, and queues a create surface command
  155.  * for the host. Does not wait for host completion. The surface ID can be
  156.  * used directly in the command stream and shows up as the same surface
  157.  * ID on the host.
  158.  */
  159.  
  160. /**
  161.  * struct drm_wmv_surface_create_req
  162.  *
  163.  * @flags: Surface flags as understood by the host.
  164.  * @format: Surface format as understood by the host.
  165.  * @mip_levels: Number of mip levels for each face.
  166.  * An unused face should have 0 encoded.
  167.  * @size_addr: Address of a user-space array of sruct drm_vmw_size
  168.  * cast to an uint64_t for 32-64 bit compatibility.
  169.  * The size of the array should equal the total number of mipmap levels.
  170.  * @shareable: Boolean whether other clients (as identified by file descriptors)
  171.  * may reference this surface.
  172.  * @scanout: Boolean whether the surface is intended to be used as a
  173.  * scanout.
  174.  *
  175.  * Input data to the DRM_VMW_CREATE_SURFACE Ioctl.
  176.  * Output data from the DRM_VMW_REF_SURFACE Ioctl.
  177.  */
  178.  
  179. struct drm_vmw_surface_create_req {
  180.         uint32_t flags;
  181.         uint32_t format;
  182.         uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
  183.         uint64_t size_addr;
  184.         int32_t shareable;
  185.         int32_t scanout;
  186. };
  187.  
  188. /**
  189.  * struct drm_wmv_surface_arg
  190.  *
  191.  * @sid: Surface id of created surface or surface to destroy or reference.
  192.  * @handle_type: Handle type for DRM_VMW_REF_SURFACE Ioctl.
  193.  *
  194.  * Output data from the DRM_VMW_CREATE_SURFACE Ioctl.
  195.  * Input argument to the DRM_VMW_UNREF_SURFACE Ioctl.
  196.  * Input argument to the DRM_VMW_REF_SURFACE Ioctl.
  197.  */
  198.  
  199. struct drm_vmw_surface_arg {
  200.         int32_t sid;
  201.         enum drm_vmw_handle_type handle_type;
  202. };
  203.  
  204. /**
  205.  * struct drm_vmw_size ioctl.
  206.  *
  207.  * @width - mip level width
  208.  * @height - mip level height
  209.  * @depth - mip level depth
  210.  *
  211.  * Description of a mip level.
  212.  * Input data to the DRM_WMW_CREATE_SURFACE Ioctl.
  213.  */
  214.  
  215. struct drm_vmw_size {
  216.         uint32_t width;
  217.         uint32_t height;
  218.         uint32_t depth;
  219.         uint32_t pad64;
  220. };
  221.  
  222. /**
  223.  * union drm_vmw_surface_create_arg
  224.  *
  225.  * @rep: Output data as described above.
  226.  * @req: Input data as described above.
  227.  *
  228.  * Argument to the DRM_VMW_CREATE_SURFACE Ioctl.
  229.  */
  230.  
  231. union drm_vmw_surface_create_arg {
  232.         struct drm_vmw_surface_arg rep;
  233.         struct drm_vmw_surface_create_req req;
  234. };
  235.  
  236. /*************************************************************************/
  237. /**
  238.  * DRM_VMW_REF_SURFACE - Reference a host surface.
  239.  *
  240.  * Puts a reference on a host surface with a give sid, as previously
  241.  * returned by the DRM_VMW_CREATE_SURFACE ioctl.
  242.  * A reference will make sure the surface isn't destroyed while we hold
  243.  * it and will allow the calling client to use the surface ID in the command
  244.  * stream.
  245.  *
  246.  * On successful return, the Ioctl returns the surface information given
  247.  * in the DRM_VMW_CREATE_SURFACE ioctl.
  248.  */
  249.  
  250. /**
  251.  * union drm_vmw_surface_reference_arg
  252.  *
  253.  * @rep: Output data as described above.
  254.  * @req: Input data as described above.
  255.  *
  256.  * Argument to the DRM_VMW_REF_SURFACE Ioctl.
  257.  */
  258.  
  259. union drm_vmw_surface_reference_arg {
  260.         struct drm_vmw_surface_create_req rep;
  261.         struct drm_vmw_surface_arg req;
  262. };
  263.  
  264. /*************************************************************************/
  265. /**
  266.  * DRM_VMW_UNREF_SURFACE - Unreference a host surface.
  267.  *
  268.  * Clear a reference previously put on a host surface.
  269.  * When all references are gone, including the one implicitly placed
  270.  * on creation,
  271.  * a destroy surface command will be queued for the host.
  272.  * Does not wait for completion.
  273.  */
  274.  
  275. /*************************************************************************/
  276. /**
  277.  * DRM_VMW_EXECBUF
  278.  *
  279.  * Submit a command buffer for execution on the host, and return a
  280.  * fence seqno that when signaled, indicates that the command buffer has
  281.  * executed.
  282.  */
  283.  
  284. /**
  285.  * struct drm_vmw_execbuf_arg
  286.  *
  287.  * @commands: User-space address of a command buffer cast to an uint64_t.
  288.  * @command-size: Size in bytes of the command buffer.
  289.  * @throttle-us: Sleep until software is less than @throttle_us
  290.  * microseconds ahead of hardware. The driver may round this value
  291.  * to the nearest kernel tick.
  292.  * @fence_rep: User-space address of a struct drm_vmw_fence_rep cast to an
  293.  * uint64_t.
  294.  * @version: Allows expanding the execbuf ioctl parameters without breaking
  295.  * backwards compatibility, since user-space will always tell the kernel
  296.  * which version it uses.
  297.  * @flags: Execbuf flags. None currently.
  298.  *
  299.  * Argument to the DRM_VMW_EXECBUF Ioctl.
  300.  */
  301.  
  302. #define DRM_VMW_EXECBUF_VERSION 2
  303.  
  304. struct drm_vmw_execbuf_arg {
  305.         uint64_t commands;
  306.         uint32_t command_size;
  307.         uint32_t throttle_us;
  308.         uint64_t fence_rep;
  309.         uint32_t version;
  310.         uint32_t flags;
  311.         uint32_t context_handle;
  312.         uint32_t pad64;
  313. };
  314.  
  315. /**
  316.  * struct drm_vmw_fence_rep
  317.  *
  318.  * @handle: Fence object handle for fence associated with a command submission.
  319.  * @mask: Fence flags relevant for this fence object.
  320.  * @seqno: Fence sequence number in fifo. A fence object with a lower
  321.  * seqno will signal the EXEC flag before a fence object with a higher
  322.  * seqno. This can be used by user-space to avoid kernel calls to determine
  323.  * whether a fence has signaled the EXEC flag. Note that @seqno will
  324.  * wrap at 32-bit.
  325.  * @passed_seqno: The highest seqno number processed by the hardware
  326.  * so far. This can be used to mark user-space fence objects as signaled, and
  327.  * to determine whether a fence seqno might be stale.
  328.  * @error: This member should've been set to -EFAULT on submission.
  329.  * The following actions should be take on completion:
  330.  * error == -EFAULT: Fence communication failed. The host is synchronized.
  331.  * Use the last fence id read from the FIFO fence register.
  332.  * error != 0 && error != -EFAULT:
  333.  * Fence submission failed. The host is synchronized. Use the fence_seq member.
  334.  * error == 0: All is OK, The host may not be synchronized.
  335.  * Use the fence_seq member.
  336.  *
  337.  * Input / Output data to the DRM_VMW_EXECBUF Ioctl.
  338.  */
  339.  
  340. struct drm_vmw_fence_rep {
  341.         uint32_t handle;
  342.         uint32_t mask;
  343.         uint32_t seqno;
  344.         uint32_t passed_seqno;
  345.         uint32_t pad64;
  346.         int32_t error;
  347. };
  348.  
  349. /*************************************************************************/
  350. /**
  351.  * DRM_VMW_ALLOC_DMABUF
  352.  *
  353.  * Allocate a DMA buffer that is visible also to the host.
  354.  * NOTE: The buffer is
  355.  * identified by a handle and an offset, which are private to the guest, but
  356.  * useable in the command stream. The guest kernel may translate these
  357.  * and patch up the command stream accordingly. In the future, the offset may
  358.  * be zero at all times, or it may disappear from the interface before it is
  359.  * fixed.
  360.  *
  361.  * The DMA buffer may stay user-space mapped in the guest at all times,
  362.  * and is thus suitable for sub-allocation.
  363.  *
  364.  * DMA buffers are mapped using the mmap() syscall on the drm device.
  365.  */
  366.  
  367. /**
  368.  * struct drm_vmw_alloc_dmabuf_req
  369.  *
  370.  * @size: Required minimum size of the buffer.
  371.  *
  372.  * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
  373.  */
  374.  
  375. struct drm_vmw_alloc_dmabuf_req {
  376.         uint32_t size;
  377.         uint32_t pad64;
  378. };
  379.  
  380. /**
  381.  * struct drm_vmw_dmabuf_rep
  382.  *
  383.  * @map_handle: Offset to use in the mmap() call used to map the buffer.
  384.  * @handle: Handle unique to this buffer. Used for unreferencing.
  385.  * @cur_gmr_id: GMR id to use in the command stream when this buffer is
  386.  * referenced. See not above.
  387.  * @cur_gmr_offset: Offset to use in the command stream when this buffer is
  388.  * referenced. See note above.
  389.  *
  390.  * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
  391.  */
  392.  
  393. struct drm_vmw_dmabuf_rep {
  394.         uint64_t map_handle;
  395.         uint32_t handle;
  396.         uint32_t cur_gmr_id;
  397.         uint32_t cur_gmr_offset;
  398.         uint32_t pad64;
  399. };
  400.  
  401. /**
  402.  * union drm_vmw_dmabuf_arg
  403.  *
  404.  * @req: Input data as described above.
  405.  * @rep: Output data as described above.
  406.  *
  407.  * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
  408.  */
  409.  
  410. union drm_vmw_alloc_dmabuf_arg {
  411.         struct drm_vmw_alloc_dmabuf_req req;
  412.         struct drm_vmw_dmabuf_rep rep;
  413. };
  414.  
  415. /*************************************************************************/
  416. /**
  417.  * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
  418.  *
  419.  */
  420.  
  421. /**
  422.  * struct drm_vmw_unref_dmabuf_arg
  423.  *
  424.  * @handle: Handle indicating what buffer to free. Obtained from the
  425.  * DRM_VMW_ALLOC_DMABUF Ioctl.
  426.  *
  427.  * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
  428.  */
  429.  
  430. struct drm_vmw_unref_dmabuf_arg {
  431.         uint32_t handle;
  432.         uint32_t pad64;
  433. };
  434.  
  435. /*************************************************************************/
  436. /**
  437.  * DRM_VMW_CONTROL_STREAM - Control overlays, aka streams.
  438.  *
  439.  * This IOCTL controls the overlay units of the svga device.
  440.  * The SVGA overlay units does not work like regular hardware units in
  441.  * that they do not automaticaly read back the contents of the given dma
  442.  * buffer. But instead only read back for each call to this ioctl, and
  443.  * at any point between this call being made and a following call that
  444.  * either changes the buffer or disables the stream.
  445.  */
  446.  
  447. /**
  448.  * struct drm_vmw_rect
  449.  *
  450.  * Defines a rectangle. Used in the overlay ioctl to define
  451.  * source and destination rectangle.
  452.  */
  453.  
  454. struct drm_vmw_rect {
  455.         int32_t x;
  456.         int32_t y;
  457.         uint32_t w;
  458.         uint32_t h;
  459. };
  460.  
  461. /**
  462.  * struct drm_vmw_control_stream_arg
  463.  *
  464.  * @stream_id: Stearm to control
  465.  * @enabled: If false all following arguments are ignored.
  466.  * @handle: Handle to buffer for getting data from.
  467.  * @format: Format of the overlay as understood by the host.
  468.  * @width: Width of the overlay.
  469.  * @height: Height of the overlay.
  470.  * @size: Size of the overlay in bytes.
  471.  * @pitch: Array of pitches, the two last are only used for YUV12 formats.
  472.  * @offset: Offset from start of dma buffer to overlay.
  473.  * @src: Source rect, must be within the defined area above.
  474.  * @dst: Destination rect, x and y may be negative.
  475.  *
  476.  * Argument to the DRM_VMW_CONTROL_STREAM Ioctl.
  477.  */
  478.  
  479. struct drm_vmw_control_stream_arg {
  480.         uint32_t stream_id;
  481.         uint32_t enabled;
  482.  
  483.         uint32_t flags;
  484.         uint32_t color_key;
  485.  
  486.         uint32_t handle;
  487.         uint32_t offset;
  488.         int32_t format;
  489.         uint32_t size;
  490.         uint32_t width;
  491.         uint32_t height;
  492.         uint32_t pitch[3];
  493.  
  494.         uint32_t pad64;
  495.         struct drm_vmw_rect src;
  496.         struct drm_vmw_rect dst;
  497. };
  498.  
  499. /*************************************************************************/
  500. /**
  501.  * DRM_VMW_CURSOR_BYPASS - Give extra information about cursor bypass.
  502.  *
  503.  */
  504.  
  505. #define DRM_VMW_CURSOR_BYPASS_ALL    (1 << 0)
  506. #define DRM_VMW_CURSOR_BYPASS_FLAGS       (1)
  507.  
  508. /**
  509.  * struct drm_vmw_cursor_bypass_arg
  510.  *
  511.  * @flags: Flags.
  512.  * @crtc_id: Crtc id, only used if DMR_CURSOR_BYPASS_ALL isn't passed.
  513.  * @xpos: X position of cursor.
  514.  * @ypos: Y position of cursor.
  515.  * @xhot: X hotspot.
  516.  * @yhot: Y hotspot.
  517.  *
  518.  * Argument to the DRM_VMW_CURSOR_BYPASS Ioctl.
  519.  */
  520.  
  521. struct drm_vmw_cursor_bypass_arg {
  522.         uint32_t flags;
  523.         uint32_t crtc_id;
  524.         int32_t xpos;
  525.         int32_t ypos;
  526.         int32_t xhot;
  527.         int32_t yhot;
  528. };
  529.  
  530. /*************************************************************************/
  531. /**
  532.  * DRM_VMW_CLAIM_STREAM - Claim a single stream.
  533.  */
  534.  
  535. /**
  536.  * struct drm_vmw_context_arg
  537.  *
  538.  * @stream_id: Device unique context ID.
  539.  *
  540.  * Output argument to the DRM_VMW_CREATE_CONTEXT Ioctl.
  541.  * Input argument to the DRM_VMW_UNREF_CONTEXT Ioctl.
  542.  */
  543.  
  544. struct drm_vmw_stream_arg {
  545.         uint32_t stream_id;
  546.         uint32_t pad64;
  547. };
  548.  
  549. /*************************************************************************/
  550. /**
  551.  * DRM_VMW_UNREF_STREAM - Unclaim a stream.
  552.  *
  553.  * Return a single stream that was claimed by this process. Also makes
  554.  * sure that the stream has been stopped.
  555.  */
  556.  
  557. /*************************************************************************/
  558. /**
  559.  * DRM_VMW_GET_3D_CAP
  560.  *
  561.  * Read 3D capabilities from the FIFO
  562.  *
  563.  */
  564.  
  565. /**
  566.  * struct drm_vmw_get_3d_cap_arg
  567.  *
  568.  * @buffer: Pointer to a buffer for capability data, cast to an uint64_t
  569.  * @size: Max size to copy
  570.  *
  571.  * Input argument to the DRM_VMW_GET_3D_CAP_IOCTL
  572.  * ioctls.
  573.  */
  574.  
  575. struct drm_vmw_get_3d_cap_arg {
  576.         uint64_t buffer;
  577.         uint32_t max_size;
  578.         uint32_t pad64;
  579. };
  580.  
  581. /*************************************************************************/
  582. /**
  583.  * DRM_VMW_FENCE_WAIT
  584.  *
  585.  * Waits for a fence object to signal. The wait is interruptible, so that
  586.  * signals may be delivered during the interrupt. The wait may timeout,
  587.  * in which case the calls returns -EBUSY. If the wait is restarted,
  588.  * that is restarting without resetting @cookie_valid to zero,
  589.  * the timeout is computed from the first call.
  590.  *
  591.  * The flags argument to the DRM_VMW_FENCE_WAIT ioctl indicates what to wait
  592.  * on:
  593.  * DRM_VMW_FENCE_FLAG_EXEC: All commands ahead of the fence in the command
  594.  * stream
  595.  * have executed.
  596.  * DRM_VMW_FENCE_FLAG_QUERY: All query results resulting from query finish
  597.  * commands
  598.  * in the buffer given to the EXECBUF ioctl returning the fence object handle
  599.  * are available to user-space.
  600.  *
  601.  * DRM_VMW_WAIT_OPTION_UNREF: If this wait option is given, and the
  602.  * fenc wait ioctl returns 0, the fence object has been unreferenced after
  603.  * the wait.
  604.  */
  605.  
  606. #define DRM_VMW_FENCE_FLAG_EXEC   (1 << 0)
  607. #define DRM_VMW_FENCE_FLAG_QUERY  (1 << 1)
  608.  
  609. #define DRM_VMW_WAIT_OPTION_UNREF (1 << 0)
  610.  
  611. /**
  612.  * struct drm_vmw_fence_wait_arg
  613.  *
  614.  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
  615.  * @cookie_valid: Must be reset to 0 on first call. Left alone on restart.
  616.  * @kernel_cookie: Set to 0 on first call. Left alone on restart.
  617.  * @timeout_us: Wait timeout in microseconds. 0 for indefinite timeout.
  618.  * @lazy: Set to 1 if timing is not critical. Allow more than a kernel tick
  619.  * before returning.
  620.  * @flags: Fence flags to wait on.
  621.  * @wait_options: Options that control the behaviour of the wait ioctl.
  622.  *
  623.  * Input argument to the DRM_VMW_FENCE_WAIT ioctl.
  624.  */
  625.  
  626. struct drm_vmw_fence_wait_arg {
  627.         uint32_t handle;
  628.         int32_t  cookie_valid;
  629.         uint64_t kernel_cookie;
  630.         uint64_t timeout_us;
  631.         int32_t lazy;
  632.         int32_t flags;
  633.         int32_t wait_options;
  634.         int32_t pad64;
  635. };
  636.  
  637. /*************************************************************************/
  638. /**
  639.  * DRM_VMW_FENCE_SIGNALED
  640.  *
  641.  * Checks if a fence object is signaled..
  642.  */
  643.  
  644. /**
  645.  * struct drm_vmw_fence_signaled_arg
  646.  *
  647.  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
  648.  * @flags: Fence object flags input to DRM_VMW_FENCE_SIGNALED ioctl
  649.  * @signaled: Out: Flags signaled.
  650.  * @sequence: Out: Highest sequence passed so far. Can be used to signal the
  651.  * EXEC flag of user-space fence objects.
  652.  *
  653.  * Input/Output argument to the DRM_VMW_FENCE_SIGNALED and DRM_VMW_FENCE_UNREF
  654.  * ioctls.
  655.  */
  656.  
  657. struct drm_vmw_fence_signaled_arg {
  658.          uint32_t handle;
  659.          uint32_t flags;
  660.          int32_t signaled;
  661.          uint32_t passed_seqno;
  662.          uint32_t signaled_flags;
  663.          uint32_t pad64;
  664. };
  665.  
  666. /*************************************************************************/
  667. /**
  668.  * DRM_VMW_FENCE_UNREF
  669.  *
  670.  * Unreferences a fence object, and causes it to be destroyed if there are no
  671.  * other references to it.
  672.  *
  673.  */
  674.  
  675. /**
  676.  * struct drm_vmw_fence_arg
  677.  *
  678.  * @handle: Fence object handle as returned by the DRM_VMW_EXECBUF ioctl.
  679.  *
  680.  * Input/Output argument to the DRM_VMW_FENCE_UNREF ioctl..
  681.  */
  682.  
  683. struct drm_vmw_fence_arg {
  684.          uint32_t handle;
  685.          uint32_t pad64;
  686. };
  687.  
  688.  
  689. /*************************************************************************/
  690. /**
  691.  * DRM_VMW_FENCE_EVENT
  692.  *
  693.  * Queues an event on a fence to be delivered on the drm character device
  694.  * when the fence has signaled the DRM_VMW_FENCE_FLAG_EXEC flag.
  695.  * Optionally the approximate time when the fence signaled is
  696.  * given by the event.
  697.  */
  698.  
  699. /*
  700.  * The event type
  701.  */
  702. #define DRM_VMW_EVENT_FENCE_SIGNALED 0x80000000
  703.  
  704. struct drm_vmw_event_fence {
  705.         struct drm_event base;
  706.         uint64_t user_data;
  707.         uint32_t tv_sec;
  708.         uint32_t tv_usec;
  709. };
  710.  
  711. /*
  712.  * Flags that may be given to the command.
  713.  */
  714. /* Request fence signaled time on the event. */
  715. #define DRM_VMW_FE_FLAG_REQ_TIME (1 << 0)
  716.  
  717. /**
  718.  * struct drm_vmw_fence_event_arg
  719.  *
  720.  * @fence_rep: Pointer to fence_rep structure cast to uint64_t or 0 if
  721.  * the fence is not supposed to be referenced by user-space.
  722.  * @user_info: Info to be delivered with the event.
  723.  * @handle: Attach the event to this fence only.
  724.  * @flags: A set of flags as defined above.
  725.  */
  726. struct drm_vmw_fence_event_arg {
  727.         uint64_t fence_rep;
  728.         uint64_t user_data;
  729.         uint32_t handle;
  730.         uint32_t flags;
  731. };
  732.  
  733.  
  734. /*************************************************************************/
  735. /**
  736.  * DRM_VMW_PRESENT
  737.  *
  738.  * Executes an SVGA present on a given fb for a given surface. The surface
  739.  * is placed on the framebuffer. Cliprects are given relative to the given
  740.  * point (the point disignated by dest_{x|y}).
  741.  *
  742.  */
  743.  
  744. /**
  745.  * struct drm_vmw_present_arg
  746.  * @fb_id: framebuffer id to present / read back from.
  747.  * @sid: Surface id to present from.
  748.  * @dest_x: X placement coordinate for surface.
  749.  * @dest_y: Y placement coordinate for surface.
  750.  * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
  751.  * @num_clips: Number of cliprects given relative to the framebuffer origin,
  752.  * in the same coordinate space as the frame buffer.
  753.  * @pad64: Unused 64-bit padding.
  754.  *
  755.  * Input argument to the DRM_VMW_PRESENT ioctl.
  756.  */
  757.  
  758. struct drm_vmw_present_arg {
  759.         uint32_t fb_id;
  760.         uint32_t sid;
  761.         int32_t dest_x;
  762.         int32_t dest_y;
  763.         uint64_t clips_ptr;
  764.         uint32_t num_clips;
  765.         uint32_t pad64;
  766. };
  767.  
  768.  
  769. /*************************************************************************/
  770. /**
  771.  * DRM_VMW_PRESENT_READBACK
  772.  *
  773.  * Executes an SVGA present readback from a given fb to the dma buffer
  774.  * currently bound as the fb. If there is no dma buffer bound to the fb,
  775.  * an error will be returned.
  776.  *
  777.  */
  778.  
  779. /**
  780.  * struct drm_vmw_present_arg
  781.  * @fb_id: fb_id to present / read back from.
  782.  * @num_clips: Number of cliprects.
  783.  * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
  784.  * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
  785.  * If this member is NULL, then the ioctl should not return a fence.
  786.  */
  787.  
  788. struct drm_vmw_present_readback_arg {
  789.          uint32_t fb_id;
  790.          uint32_t num_clips;
  791.          uint64_t clips_ptr;
  792.          uint64_t fence_rep;
  793. };
  794.  
  795. /*************************************************************************/
  796. /**
  797.  * DRM_VMW_UPDATE_LAYOUT - Update layout
  798.  *
  799.  * Updates the preferred modes and connection status for connectors. The
  800.  * command consists of one drm_vmw_update_layout_arg pointing to an array
  801.  * of num_outputs drm_vmw_rect's.
  802.  */
  803.  
  804. /**
  805.  * struct drm_vmw_update_layout_arg
  806.  *
  807.  * @num_outputs: number of active connectors
  808.  * @rects: pointer to array of drm_vmw_rect cast to an uint64_t
  809.  *
  810.  * Input argument to the DRM_VMW_UPDATE_LAYOUT Ioctl.
  811.  */
  812. struct drm_vmw_update_layout_arg {
  813.         uint32_t num_outputs;
  814.         uint32_t pad64;
  815.         uint64_t rects;
  816. };
  817.  
  818.  
  819. /*************************************************************************/
  820. /**
  821.  * DRM_VMW_CREATE_SHADER - Create shader
  822.  *
  823.  * Creates a shader and optionally binds it to a dma buffer containing
  824.  * the shader byte-code.
  825.  */
  826.  
  827. /**
  828.  * enum drm_vmw_shader_type - Shader types
  829.  */
  830. enum drm_vmw_shader_type {
  831.         drm_vmw_shader_type_vs = 0,
  832.         drm_vmw_shader_type_ps,
  833. };
  834.  
  835.  
  836. /**
  837.  * struct drm_vmw_shader_create_arg
  838.  *
  839.  * @shader_type: Shader type of the shader to create.
  840.  * @size: Size of the byte-code in bytes.
  841.  * where the shader byte-code starts
  842.  * @buffer_handle: Buffer handle identifying the buffer containing the
  843.  * shader byte-code
  844.  * @shader_handle: On successful completion contains a handle that
  845.  * can be used to subsequently identify the shader.
  846.  * @offset: Offset in bytes into the buffer given by @buffer_handle,
  847.  *
  848.  * Input / Output argument to the DRM_VMW_CREATE_SHADER Ioctl.
  849.  */
  850. struct drm_vmw_shader_create_arg {
  851.         enum drm_vmw_shader_type shader_type;
  852.         uint32_t size;
  853.         uint32_t buffer_handle;
  854.         uint32_t shader_handle;
  855.         uint64_t offset;
  856. };
  857.  
  858. /*************************************************************************/
  859. /**
  860.  * DRM_VMW_UNREF_SHADER - Unreferences a shader
  861.  *
  862.  * Destroys a user-space reference to a shader, optionally destroying
  863.  * it.
  864.  */
  865.  
  866. /**
  867.  * struct drm_vmw_shader_arg
  868.  *
  869.  * @handle: Handle identifying the shader to destroy.
  870.  *
  871.  * Input argument to the DRM_VMW_UNREF_SHADER ioctl.
  872.  */
  873. struct drm_vmw_shader_arg {
  874.         uint32_t handle;
  875.         uint32_t pad64;
  876. };
  877.  
  878. /*************************************************************************/
  879. /**
  880.  * DRM_VMW_GB_SURFACE_CREATE - Create a host guest-backed surface.
  881.  *
  882.  * Allocates a surface handle and queues a create surface command
  883.  * for the host on the first use of the surface. The surface ID can
  884.  * be used as the surface ID in commands referencing the surface.
  885.  */
  886.  
  887. /**
  888.  * enum drm_vmw_surface_flags
  889.  *
  890.  * @drm_vmw_surface_flag_shareable:     Whether the surface is shareable
  891.  * @drm_vmw_surface_flag_scanout:       Whether the surface is a scanout
  892.  *                                      surface.
  893.  * @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
  894.  *                                      given.
  895.  */
  896. enum drm_vmw_surface_flags {
  897.         drm_vmw_surface_flag_shareable = (1 << 0),
  898.         drm_vmw_surface_flag_scanout = (1 << 1),
  899.         drm_vmw_surface_flag_create_buffer = (1 << 2)
  900. };
  901.  
  902. /**
  903.  * struct drm_vmw_gb_surface_create_req
  904.  *
  905.  * @svga3d_flags:     SVGA3d surface flags for the device.
  906.  * @format:           SVGA3d format.
  907.  * @mip_level:        Number of mip levels for all faces.
  908.  * @drm_surface_flags Flags as described above.
  909.  * @multisample_count Future use. Set to 0.
  910.  * @autogen_filter    Future use. Set to 0.
  911.  * @buffer_handle     Buffer handle of backup buffer. SVGA3D_INVALID_ID
  912.  *                    if none.
  913.  * @base_size         Size of the base mip level for all faces.
  914.  * @array_size        Must be zero for non-DX hardware, and if non-zero
  915.  *                    svga3d_flags must have proper bind flags setup.
  916.  *
  917.  * Input argument to the  DRM_VMW_GB_SURFACE_CREATE Ioctl.
  918.  * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl.
  919.  */
  920. struct drm_vmw_gb_surface_create_req {
  921.         uint32_t svga3d_flags;
  922.         uint32_t format;
  923.         uint32_t mip_levels;
  924.         enum drm_vmw_surface_flags drm_surface_flags;
  925.         uint32_t multisample_count;
  926.         uint32_t autogen_filter;
  927.         uint32_t buffer_handle;
  928.         uint32_t array_size;
  929.         struct drm_vmw_size base_size;
  930. };
  931.  
  932. /**
  933.  * struct drm_vmw_gb_surface_create_rep
  934.  *
  935.  * @handle:            Surface handle.
  936.  * @backup_size:       Size of backup buffers for this surface.
  937.  * @buffer_handle:     Handle of backup buffer. SVGA3D_INVALID_ID if none.
  938.  * @buffer_size:       Actual size of the buffer identified by
  939.  *                     @buffer_handle
  940.  * @buffer_map_handle: Offset into device address space for the buffer
  941.  *                     identified by @buffer_handle.
  942.  *
  943.  * Part of output argument for the DRM_VMW_GB_SURFACE_REF ioctl.
  944.  * Output argument for the DRM_VMW_GB_SURFACE_CREATE ioctl.
  945.  */
  946. struct drm_vmw_gb_surface_create_rep {
  947.         uint32_t handle;
  948.         uint32_t backup_size;
  949.         uint32_t buffer_handle;
  950.         uint32_t buffer_size;
  951.         uint64_t buffer_map_handle;
  952. };
  953.  
  954. /**
  955.  * union drm_vmw_gb_surface_create_arg
  956.  *
  957.  * @req: Input argument as described above.
  958.  * @rep: Output argument as described above.
  959.  *
  960.  * Argument to the DRM_VMW_GB_SURFACE_CREATE ioctl.
  961.  */
  962. union drm_vmw_gb_surface_create_arg {
  963.         struct drm_vmw_gb_surface_create_rep rep;
  964.         struct drm_vmw_gb_surface_create_req req;
  965. };
  966.  
  967. /*************************************************************************/
  968. /**
  969.  * DRM_VMW_GB_SURFACE_REF - Reference a host surface.
  970.  *
  971.  * Puts a reference on a host surface with a given handle, as previously
  972.  * returned by the DRM_VMW_GB_SURFACE_CREATE ioctl.
  973.  * A reference will make sure the surface isn't destroyed while we hold
  974.  * it and will allow the calling client to use the surface handle in
  975.  * the command stream.
  976.  *
  977.  * On successful return, the Ioctl returns the surface information given
  978.  * to and returned from the DRM_VMW_GB_SURFACE_CREATE ioctl.
  979.  */
  980.  
  981. /**
  982.  * struct drm_vmw_gb_surface_reference_arg
  983.  *
  984.  * @creq: The data used as input when the surface was created, as described
  985.  *        above at "struct drm_vmw_gb_surface_create_req"
  986.  * @crep: Additional data output when the surface was created, as described
  987.  *        above at "struct drm_vmw_gb_surface_create_rep"
  988.  *
  989.  * Output Argument to the DRM_VMW_GB_SURFACE_REF ioctl.
  990.  */
  991. struct drm_vmw_gb_surface_ref_rep {
  992.         struct drm_vmw_gb_surface_create_req creq;
  993.         struct drm_vmw_gb_surface_create_rep crep;
  994. };
  995.  
  996. /**
  997.  * union drm_vmw_gb_surface_reference_arg
  998.  *
  999.  * @req: Input data as described above at "struct drm_vmw_surface_arg"
  1000.  * @rep: Output data as described above at "struct drm_vmw_gb_surface_ref_rep"
  1001.  *
  1002.  * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
  1003.  */
  1004. union drm_vmw_gb_surface_reference_arg {
  1005.         struct drm_vmw_gb_surface_ref_rep rep;
  1006.         struct drm_vmw_surface_arg req;
  1007. };
  1008.  
  1009.  
  1010. /*************************************************************************/
  1011. /**
  1012.  * DRM_VMW_SYNCCPU - Sync a DMA buffer / MOB for CPU access.
  1013.  *
  1014.  * Idles any previously submitted GPU operations on the buffer and
  1015.  * by default blocks command submissions that reference the buffer.
  1016.  * If the file descriptor used to grab a blocking CPU sync is closed, the
  1017.  * cpu sync is released.
  1018.  * The flags argument indicates how the grab / release operation should be
  1019.  * performed:
  1020.  */
  1021.  
  1022. /**
  1023.  * enum drm_vmw_synccpu_flags - Synccpu flags:
  1024.  *
  1025.  * @drm_vmw_synccpu_read: Sync for read. If sync is done for read only, it's a
  1026.  * hint to the kernel to allow command submissions that references the buffer
  1027.  * for read-only.
  1028.  * @drm_vmw_synccpu_write: Sync for write. Block all command submissions
  1029.  * referencing this buffer.
  1030.  * @drm_vmw_synccpu_dontblock: Dont wait for GPU idle, but rather return
  1031.  * -EBUSY should the buffer be busy.
  1032.  * @drm_vmw_synccpu_allow_cs: Allow command submission that touches the buffer
  1033.  * while the buffer is synced for CPU. This is similar to the GEM bo idle
  1034.  * behavior.
  1035.  */
  1036. enum drm_vmw_synccpu_flags {
  1037.         drm_vmw_synccpu_read = (1 << 0),
  1038.         drm_vmw_synccpu_write = (1 << 1),
  1039.         drm_vmw_synccpu_dontblock = (1 << 2),
  1040.         drm_vmw_synccpu_allow_cs = (1 << 3)
  1041. };
  1042.  
  1043. /**
  1044.  * enum drm_vmw_synccpu_op - Synccpu operations:
  1045.  *
  1046.  * @drm_vmw_synccpu_grab:    Grab the buffer for CPU operations
  1047.  * @drm_vmw_synccpu_release: Release a previous grab.
  1048.  */
  1049. enum drm_vmw_synccpu_op {
  1050.         drm_vmw_synccpu_grab,
  1051.         drm_vmw_synccpu_release
  1052. };
  1053.  
  1054. /**
  1055.  * struct drm_vmw_synccpu_arg
  1056.  *
  1057.  * @op:                      The synccpu operation as described above.
  1058.  * @handle:                  Handle identifying the buffer object.
  1059.  * @flags:                   Flags as described above.
  1060.  */
  1061. struct drm_vmw_synccpu_arg {
  1062.         enum drm_vmw_synccpu_op op;
  1063.         enum drm_vmw_synccpu_flags flags;
  1064.         uint32_t handle;
  1065.         uint32_t pad64;
  1066. };
  1067.  
  1068. /*************************************************************************/
  1069. /**
  1070.  * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context.
  1071.  *
  1072.  * Allocates a device unique context id, and queues a create context command
  1073.  * for the host. Does not wait for host completion.
  1074.  */
  1075. enum drm_vmw_extended_context {
  1076.         drm_vmw_context_legacy,
  1077.         drm_vmw_context_dx
  1078. };
  1079.  
  1080. /**
  1081.  * union drm_vmw_extended_context_arg
  1082.  *
  1083.  * @req: Context type.
  1084.  * @rep: Context identifier.
  1085.  *
  1086.  * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl.
  1087.  */
  1088. union drm_vmw_extended_context_arg {
  1089.         enum drm_vmw_extended_context req;
  1090.         struct drm_vmw_context_arg rep;
  1091. };
  1092. #endif
  1093.