Subversion Repositories Kolibri OS

Rev

Rev 6082 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | Download | RSS feed

  1. /*
  2.  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  3.  * All Rights Reserved.
  4.  *
  5.  * Permission is hereby granted, free of charge, to any person obtaining a
  6.  * copy of this software and associated documentation files (the
  7.  * "Software"), to deal in the Software without restriction, including
  8.  * without limitation the rights to use, copy, modify, merge, publish,
  9.  * distribute, sub license, and/or sell copies of the Software, and to
  10.  * permit persons to whom the Software is furnished to do so, subject to
  11.  * the following conditions:
  12.  *
  13.  * The above copyright notice and this permission notice (including the
  14.  * next paragraph) shall be included in all copies or substantial portions
  15.  * of the Software.
  16.  *
  17.  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  18.  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19.  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
  20.  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
  21.  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
  22.  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  23.  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24.  *
  25.  */
  26.  
  27. #ifndef _UAPI_I915_DRM_H_
  28. #define _UAPI_I915_DRM_H_
  29.  
  30. #include <drm/drm.h>
  31.  
  32. /* Please note that modifications to all structs defined here are
  33.  * subject to backwards-compatibility constraints.
  34.  */
  35.  
  36. /**
  37.  * DOC: uevents generated by i915 on it's device node
  38.  *
  39.  * I915_L3_PARITY_UEVENT - Generated when the driver receives a parity mismatch
  40.  *      event from the gpu l3 cache. Additional information supplied is ROW,
  41.  *      BANK, SUBBANK, SLICE of the affected cacheline. Userspace should keep
  42.  *      track of these events and if a specific cache-line seems to have a
  43.  *      persistent error remap it with the l3 remapping tool supplied in
  44.  *      intel-gpu-tools.  The value supplied with the event is always 1.
  45.  *
  46.  * I915_ERROR_UEVENT - Generated upon error detection, currently only via
  47.  *      hangcheck. The error detection event is a good indicator of when things
  48.  *      began to go badly. The value supplied with the event is a 1 upon error
  49.  *      detection, and a 0 upon reset completion, signifying no more error
  50.  *      exists. NOTE: Disabling hangcheck or reset via module parameter will
  51.  *      cause the related events to not be seen.
  52.  *
  53.  * I915_RESET_UEVENT - Event is generated just before an attempt to reset the
  54.  *      the GPU. The value supplied with the event is always 1. NOTE: Disable
  55.  *      reset via module parameter will cause this event to not be seen.
  56.  */
  57. #define I915_L3_PARITY_UEVENT           "L3_PARITY_ERROR"
  58. #define I915_ERROR_UEVENT               "ERROR"
  59. #define I915_RESET_UEVENT               "RESET"
  60.  
  61. /* Each region is a minimum of 16k, and there are at most 255 of them.
  62.  */
  63. #define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
  64.                                  * of chars for next/prev indices */
  65. #define I915_LOG_MIN_TEX_REGION_SIZE 14
  66.  
  67. typedef struct _drm_i915_init {
  68.         enum {
  69.                 I915_INIT_DMA = 0x01,
  70.                 I915_CLEANUP_DMA = 0x02,
  71.                 I915_RESUME_DMA = 0x03
  72.         } func;
  73.         unsigned int mmio_offset;
  74.         int sarea_priv_offset;
  75.         unsigned int ring_start;
  76.         unsigned int ring_end;
  77.         unsigned int ring_size;
  78.         unsigned int front_offset;
  79.         unsigned int back_offset;
  80.         unsigned int depth_offset;
  81.         unsigned int w;
  82.         unsigned int h;
  83.         unsigned int pitch;
  84.         unsigned int pitch_bits;
  85.         unsigned int back_pitch;
  86.         unsigned int depth_pitch;
  87.         unsigned int cpp;
  88.         unsigned int chipset;
  89. } drm_i915_init_t;
  90.  
  91. typedef struct _drm_i915_sarea {
  92.         struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
  93.         int last_upload;        /* last time texture was uploaded */
  94.         int last_enqueue;       /* last time a buffer was enqueued */
  95.         int last_dispatch;      /* age of the most recently dispatched buffer */
  96.         int ctxOwner;           /* last context to upload state */
  97.         int texAge;
  98.         int pf_enabled;         /* is pageflipping allowed? */
  99.         int pf_active;
  100.         int pf_current_page;    /* which buffer is being displayed? */
  101.         int perf_boxes;         /* performance boxes to be displayed */
  102.         int width, height;      /* screen size in pixels */
  103.  
  104.         drm_handle_t front_handle;
  105.         int front_offset;
  106.         int front_size;
  107.  
  108.         drm_handle_t back_handle;
  109.         int back_offset;
  110.         int back_size;
  111.  
  112.         drm_handle_t depth_handle;
  113.         int depth_offset;
  114.         int depth_size;
  115.  
  116.         drm_handle_t tex_handle;
  117.         int tex_offset;
  118.         int tex_size;
  119.         int log_tex_granularity;
  120.         int pitch;
  121.         int rotation;           /* 0, 90, 180 or 270 */
  122.         int rotated_offset;
  123.         int rotated_size;
  124.         int rotated_pitch;
  125.         int virtualX, virtualY;
  126.  
  127.         unsigned int front_tiled;
  128.         unsigned int back_tiled;
  129.         unsigned int depth_tiled;
  130.         unsigned int rotated_tiled;
  131.         unsigned int rotated2_tiled;
  132.  
  133.         int pipeA_x;
  134.         int pipeA_y;
  135.         int pipeA_w;
  136.         int pipeA_h;
  137.         int pipeB_x;
  138.         int pipeB_y;
  139.         int pipeB_w;
  140.         int pipeB_h;
  141.  
  142.         /* fill out some space for old userspace triple buffer */
  143.         drm_handle_t unused_handle;
  144.         __u32 unused1, unused2, unused3;
  145.  
  146.         /* buffer object handles for static buffers. May change
  147.          * over the lifetime of the client.
  148.          */
  149.         __u32 front_bo_handle;
  150.         __u32 back_bo_handle;
  151.         __u32 unused_bo_handle;
  152.         __u32 depth_bo_handle;
  153.  
  154. } drm_i915_sarea_t;
  155.  
  156. /* due to userspace building against these headers we need some compat here */
  157. #define planeA_x pipeA_x
  158. #define planeA_y pipeA_y
  159. #define planeA_w pipeA_w
  160. #define planeA_h pipeA_h
  161. #define planeB_x pipeB_x
  162. #define planeB_y pipeB_y
  163. #define planeB_w pipeB_w
  164. #define planeB_h pipeB_h
  165.  
  166. /* Flags for perf_boxes
  167.  */
  168. #define I915_BOX_RING_EMPTY    0x1
  169. #define I915_BOX_FLIP          0x2
  170. #define I915_BOX_WAIT          0x4
  171. #define I915_BOX_TEXTURE_LOAD  0x8
  172. #define I915_BOX_LOST_CONTEXT  0x10
  173.  
  174. /* I915 specific ioctls
  175.  * The device specific ioctl range is 0x40 to 0x79.
  176.  */
  177. #define DRM_I915_INIT           0x00
  178. #define DRM_I915_FLUSH          0x01
  179. #define DRM_I915_FLIP           0x02
  180. #define DRM_I915_BATCHBUFFER    0x03
  181. #define DRM_I915_IRQ_EMIT       0x04
  182. #define DRM_I915_IRQ_WAIT       0x05
  183. #define DRM_I915_GETPARAM       0x06
  184. #define DRM_I915_SETPARAM       0x07
  185. #define DRM_I915_ALLOC          0x08
  186. #define DRM_I915_FREE           0x09
  187. #define DRM_I915_INIT_HEAP      0x0a
  188. #define DRM_I915_CMDBUFFER      0x0b
  189. #define DRM_I915_DESTROY_HEAP   0x0c
  190. #define DRM_I915_SET_VBLANK_PIPE        0x0d
  191. #define DRM_I915_GET_VBLANK_PIPE        0x0e
  192. #define DRM_I915_VBLANK_SWAP    0x0f
  193. #define DRM_I915_HWS_ADDR       0x11
  194. #define DRM_I915_GEM_INIT       0x13
  195. #define DRM_I915_GEM_EXECBUFFER 0x14
  196. #define DRM_I915_GEM_PIN        0x15
  197. #define DRM_I915_GEM_UNPIN      0x16
  198. #define DRM_I915_GEM_BUSY       0x17
  199. #define DRM_I915_GEM_THROTTLE   0x18
  200. #define DRM_I915_GEM_ENTERVT    0x19
  201. #define DRM_I915_GEM_LEAVEVT    0x1a
  202. #define DRM_I915_GEM_CREATE     0x1b
  203. #define DRM_I915_GEM_PREAD      0x1c
  204. #define DRM_I915_GEM_PWRITE     0x1d
  205. #define DRM_I915_GEM_MMAP       0x1e
  206. #define DRM_I915_GEM_SET_DOMAIN 0x1f
  207. #define DRM_I915_GEM_SW_FINISH  0x20
  208. #define DRM_I915_GEM_SET_TILING 0x21
  209. #define DRM_I915_GEM_GET_TILING 0x22
  210. #define DRM_I915_GEM_GET_APERTURE 0x23
  211. #define DRM_I915_GEM_MMAP_GTT   0x24
  212. #define DRM_I915_GET_PIPE_FROM_CRTC_ID  0x25
  213. #define DRM_I915_GEM_MADVISE    0x26
  214. #define DRM_I915_OVERLAY_PUT_IMAGE      0x27
  215. #define DRM_I915_OVERLAY_ATTRS  0x28
  216. #define DRM_I915_GEM_EXECBUFFER2        0x29
  217. #define DRM_I915_GET_SPRITE_COLORKEY    0x2a
  218. #define DRM_I915_SET_SPRITE_COLORKEY    0x2b
  219. #define DRM_I915_GEM_WAIT       0x2c
  220. #define DRM_I915_GEM_CONTEXT_CREATE     0x2d
  221. #define DRM_I915_GEM_CONTEXT_DESTROY    0x2e
  222. #define DRM_I915_GEM_SET_CACHING        0x2f
  223. #define DRM_I915_GEM_GET_CACHING        0x30
  224. #define DRM_I915_REG_READ               0x31
  225. #define DRM_I915_GET_RESET_STATS        0x32
  226. #define DRM_I915_GEM_USERPTR            0x33
  227.  
  228. #define DRM_IOCTL_I915_INIT             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
  229. #define DRM_IOCTL_I915_FLUSH            DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH)
  230. #define DRM_IOCTL_I915_FLIP             DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLIP)
  231. #define DRM_IOCTL_I915_BATCHBUFFER      DRM_IOW( DRM_COMMAND_BASE + DRM_I915_BATCHBUFFER, drm_i915_batchbuffer_t)
  232. #define DRM_IOCTL_I915_IRQ_EMIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_IRQ_EMIT, drm_i915_irq_emit_t)
  233. #define DRM_IOCTL_I915_IRQ_WAIT         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_IRQ_WAIT, drm_i915_irq_wait_t)
  234. #define DRM_IOCTL_I915_GETPARAM         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GETPARAM, drm_i915_getparam_t)
  235. #define DRM_IOCTL_I915_SETPARAM         DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SETPARAM, drm_i915_setparam_t)
  236. #define DRM_IOCTL_I915_ALLOC            DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_ALLOC, drm_i915_mem_alloc_t)
  237. #define DRM_IOCTL_I915_FREE             DRM_IOW( DRM_COMMAND_BASE + DRM_I915_FREE, drm_i915_mem_free_t)
  238. #define DRM_IOCTL_I915_INIT_HEAP        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT_HEAP, drm_i915_mem_init_heap_t)
  239. #define DRM_IOCTL_I915_CMDBUFFER        DRM_IOW( DRM_COMMAND_BASE + DRM_I915_CMDBUFFER, drm_i915_cmdbuffer_t)
  240. #define DRM_IOCTL_I915_DESTROY_HEAP     DRM_IOW( DRM_COMMAND_BASE + DRM_I915_DESTROY_HEAP, drm_i915_mem_destroy_heap_t)
  241. #define DRM_IOCTL_I915_SET_VBLANK_PIPE  DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
  242. #define DRM_IOCTL_I915_GET_VBLANK_PIPE  DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)
  243. #define DRM_IOCTL_I915_VBLANK_SWAP      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t)
  244. #define DRM_IOCTL_I915_HWS_ADDR         DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init)
  245. #define DRM_IOCTL_I915_GEM_INIT         DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
  246. #define DRM_IOCTL_I915_GEM_EXECBUFFER   DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
  247. #define DRM_IOCTL_I915_GEM_EXECBUFFER2  DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
  248. #define DRM_IOCTL_I915_GEM_PIN          DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
  249. #define DRM_IOCTL_I915_GEM_UNPIN        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
  250. #define DRM_IOCTL_I915_GEM_BUSY         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
  251. #define DRM_IOCTL_I915_GEM_SET_CACHING          DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_SET_CACHING, struct drm_i915_gem_caching)
  252. #define DRM_IOCTL_I915_GEM_GET_CACHING          DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_GET_CACHING, struct drm_i915_gem_caching)
  253. #define DRM_IOCTL_I915_GEM_THROTTLE     DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
  254. #define DRM_IOCTL_I915_GEM_ENTERVT      DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT)
  255. #define DRM_IOCTL_I915_GEM_LEAVEVT      DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT)
  256. #define DRM_IOCTL_I915_GEM_CREATE       DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
  257. #define DRM_IOCTL_I915_GEM_PREAD        DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
  258. #define DRM_IOCTL_I915_GEM_PWRITE       DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
  259. #define DRM_IOCTL_I915_GEM_MMAP         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
  260. #define DRM_IOCTL_I915_GEM_MMAP_GTT     DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
  261. #define DRM_IOCTL_I915_GEM_SET_DOMAIN   DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
  262. #define DRM_IOCTL_I915_GEM_SW_FINISH    DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
  263. #define DRM_IOCTL_I915_GEM_SET_TILING   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
  264. #define DRM_IOCTL_I915_GEM_GET_TILING   DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
  265. #define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR  (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
  266. #define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_PIPE_FROM_CRTC_ID, struct drm_i915_get_pipe_from_crtc_id)
  267. #define DRM_IOCTL_I915_GEM_MADVISE      DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
  268. #define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE        DRM_IOW(DRM_COMMAND_BASE + DRM_I915_OVERLAY_PUT_IMAGE, struct drm_intel_overlay_put_image)
  269. #define DRM_IOCTL_I915_OVERLAY_ATTRS    DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_OVERLAY_ATTRS, struct drm_intel_overlay_attrs)
  270. #define DRM_IOCTL_I915_SET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
  271. #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_SET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey)
  272. #define DRM_IOCTL_I915_GEM_WAIT         DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait)
  273. #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE       DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create)
  274. #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY      DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy)
  275. #define DRM_IOCTL_I915_REG_READ                 DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read)
  276. #define DRM_IOCTL_I915_GET_RESET_STATS          DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats)
  277. #define DRM_IOCTL_I915_GEM_USERPTR                      DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_USERPTR, struct drm_i915_gem_userptr)
  278.  
  279. /* Allow drivers to submit batchbuffers directly to hardware, relying
  280.  * on the security mechanisms provided by hardware.
  281.  */
  282. typedef struct drm_i915_batchbuffer {
  283.         int start;              /* agp offset */
  284.         int used;               /* nr bytes in use */
  285.         int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
  286.         int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
  287.         int num_cliprects;      /* mulitpass with multiple cliprects? */
  288.         struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
  289. } drm_i915_batchbuffer_t;
  290.  
  291. /* As above, but pass a pointer to userspace buffer which can be
  292.  * validated by the kernel prior to sending to hardware.
  293.  */
  294. typedef struct _drm_i915_cmdbuffer {
  295.         char __user *buf;       /* pointer to userspace command buffer */
  296.         int sz;                 /* nr bytes in buf */
  297.         int DR1;                /* hw flags for GFX_OP_DRAWRECT_INFO */
  298.         int DR4;                /* window origin for GFX_OP_DRAWRECT_INFO */
  299.         int num_cliprects;      /* mulitpass with multiple cliprects? */
  300.         struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
  301. } drm_i915_cmdbuffer_t;
  302.  
  303. /* Userspace can request & wait on irq's:
  304.  */
  305. typedef struct drm_i915_irq_emit {
  306.         int __user *irq_seq;
  307. } drm_i915_irq_emit_t;
  308.  
  309. typedef struct drm_i915_irq_wait {
  310.         int irq_seq;
  311. } drm_i915_irq_wait_t;
  312.  
  313. /* Ioctl to query kernel params:
  314.  */
  315. #define I915_PARAM_IRQ_ACTIVE            1
  316. #define I915_PARAM_ALLOW_BATCHBUFFER     2
  317. #define I915_PARAM_LAST_DISPATCH         3
  318. #define I915_PARAM_CHIPSET_ID            4
  319. #define I915_PARAM_HAS_GEM               5
  320. #define I915_PARAM_NUM_FENCES_AVAIL      6
  321. #define I915_PARAM_HAS_OVERLAY           7
  322. #define I915_PARAM_HAS_PAGEFLIPPING      8
  323. #define I915_PARAM_HAS_EXECBUF2          9
  324. #define I915_PARAM_HAS_BSD               10
  325. #define I915_PARAM_HAS_BLT               11
  326. #define I915_PARAM_HAS_RELAXED_FENCING   12
  327. #define I915_PARAM_HAS_COHERENT_RINGS    13
  328. #define I915_PARAM_HAS_EXEC_CONSTANTS    14
  329. #define I915_PARAM_HAS_RELAXED_DELTA     15
  330. #define I915_PARAM_HAS_GEN7_SOL_RESET    16
  331. #define I915_PARAM_HAS_LLC               17
  332. #define I915_PARAM_HAS_ALIASING_PPGTT    18
  333. #define I915_PARAM_HAS_WAIT_TIMEOUT      19
  334. #define I915_PARAM_HAS_SEMAPHORES        20
  335. #define I915_PARAM_HAS_PRIME_VMAP_FLUSH  21
  336. #define I915_PARAM_HAS_VEBOX             22
  337. #define I915_PARAM_HAS_SECURE_BATCHES    23
  338. #define I915_PARAM_HAS_PINNED_BATCHES    24
  339. #define I915_PARAM_HAS_EXEC_NO_RELOC     25
  340. #define I915_PARAM_HAS_EXEC_HANDLE_LUT   26
  341. #define I915_PARAM_HAS_WT                27
  342. #define I915_PARAM_CMD_PARSER_VERSION    28
  343. #define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
  344.  
  345. typedef struct drm_i915_getparam {
  346.         int param;
  347.         int __user *value;
  348. } drm_i915_getparam_t;
  349.  
  350. /* Ioctl to set kernel params:
  351.  */
  352. #define I915_SETPARAM_USE_MI_BATCHBUFFER_START            1
  353. #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY             2
  354. #define I915_SETPARAM_ALLOW_BATCHBUFFER                   3
  355. #define I915_SETPARAM_NUM_USED_FENCES                     4
  356.  
  357. typedef struct drm_i915_setparam {
  358.         int param;
  359.         int value;
  360. } drm_i915_setparam_t;
  361.  
  362. /* A memory manager for regions of shared memory:
  363.  */
  364. #define I915_MEM_REGION_AGP 1
  365.  
  366. typedef struct drm_i915_mem_alloc {
  367.         int region;
  368.         int alignment;
  369.         int size;
  370.         int __user *region_offset;      /* offset from start of fb or agp */
  371. } drm_i915_mem_alloc_t;
  372.  
  373. typedef struct drm_i915_mem_free {
  374.         int region;
  375.         int region_offset;
  376. } drm_i915_mem_free_t;
  377.  
  378. typedef struct drm_i915_mem_init_heap {
  379.         int region;
  380.         int size;
  381.         int start;
  382. } drm_i915_mem_init_heap_t;
  383.  
  384. /* Allow memory manager to be torn down and re-initialized (eg on
  385.  * rotate):
  386.  */
  387. typedef struct drm_i915_mem_destroy_heap {
  388.         int region;
  389. } drm_i915_mem_destroy_heap_t;
  390.  
  391. /* Allow X server to configure which pipes to monitor for vblank signals
  392.  */
  393. #define DRM_I915_VBLANK_PIPE_A  1
  394. #define DRM_I915_VBLANK_PIPE_B  2
  395.  
  396. typedef struct drm_i915_vblank_pipe {
  397.         int pipe;
  398. } drm_i915_vblank_pipe_t;
  399.  
  400. /* Schedule buffer swap at given vertical blank:
  401.  */
  402. typedef struct drm_i915_vblank_swap {
  403.         drm_drawable_t drawable;
  404.         enum drm_vblank_seq_type seqtype;
  405.         unsigned int sequence;
  406. } drm_i915_vblank_swap_t;
  407.  
  408. typedef struct drm_i915_hws_addr {
  409.         __u64 addr;
  410. } drm_i915_hws_addr_t;
  411.  
  412. struct drm_i915_gem_init {
  413.         /**
  414.          * Beginning offset in the GTT to be managed by the DRM memory
  415.          * manager.
  416.          */
  417.         __u64 gtt_start;
  418.         /**
  419.          * Ending offset in the GTT to be managed by the DRM memory
  420.          * manager.
  421.          */
  422.         __u64 gtt_end;
  423. };
  424.  
  425. struct drm_i915_gem_create {
  426.         /**
  427.          * Requested size for the object.
  428.          *
  429.          * The (page-aligned) allocated size for the object will be returned.
  430.          */
  431.         __u64 size;
  432.         /**
  433.          * Returned handle for the object.
  434.          *
  435.          * Object handles are nonzero.
  436.          */
  437.         __u32 handle;
  438.         __u32 pad;
  439. };
  440.  
  441. struct drm_i915_gem_pread {
  442.         /** Handle for the object being read. */
  443.         __u32 handle;
  444.         __u32 pad;
  445.         /** Offset into the object to read from */
  446.         __u64 offset;
  447.         /** Length of data to read */
  448.         __u64 size;
  449.         /**
  450.          * Pointer to write the data into.
  451.          *
  452.          * This is a fixed-size type for 32/64 compatibility.
  453.          */
  454.         __u64 data_ptr;
  455. };
  456.  
  457. struct drm_i915_gem_pwrite {
  458.         /** Handle for the object being written to. */
  459.         __u32 handle;
  460.         __u32 pad;
  461.         /** Offset into the object to write to */
  462.         __u64 offset;
  463.         /** Length of data to write */
  464.         __u64 size;
  465.         /**
  466.          * Pointer to read the data from.
  467.          *
  468.          * This is a fixed-size type for 32/64 compatibility.
  469.          */
  470.         __u64 data_ptr;
  471. };
  472.  
  473. struct drm_i915_gem_mmap {
  474.         /** Handle for the object being mapped. */
  475.         __u32 handle;
  476.         __u32 pad;
  477.         /** Offset in the object to map. */
  478.         __u64 offset;
  479.         /**
  480.          * Length of data to map.
  481.          *
  482.          * The value will be page-aligned.
  483.          */
  484.         __u64 size;
  485.         /**
  486.          * Returned pointer the data was mapped at.
  487.          *
  488.          * This is a fixed-size type for 32/64 compatibility.
  489.          */
  490.         __u64 addr_ptr;
  491. };
  492.  
  493. struct drm_i915_gem_mmap_gtt {
  494.         /** Handle for the object being mapped. */
  495.         __u32 handle;
  496.         __u32 pad;
  497.         /**
  498.          * Fake offset to use for subsequent mmap call
  499.          *
  500.          * This is a fixed-size type for 32/64 compatibility.
  501.          */
  502.         __u64 offset;
  503. };
  504.  
  505. struct drm_i915_gem_set_domain {
  506.         /** Handle for the object */
  507.         __u32 handle;
  508.  
  509.         /** New read domains */
  510.         __u32 read_domains;
  511.  
  512.         /** New write domain */
  513.         __u32 write_domain;
  514. };
  515.  
  516. struct drm_i915_gem_sw_finish {
  517.         /** Handle for the object */
  518.         __u32 handle;
  519. };
  520.  
  521. struct drm_i915_gem_relocation_entry {
  522.         /**
  523.          * Handle of the buffer being pointed to by this relocation entry.
  524.          *
  525.          * It's appealing to make this be an index into the mm_validate_entry
  526.          * list to refer to the buffer, but this allows the driver to create
  527.          * a relocation list for state buffers and not re-write it per
  528.          * exec using the buffer.
  529.          */
  530.         __u32 target_handle;
  531.  
  532.         /**
  533.          * Value to be added to the offset of the target buffer to make up
  534.          * the relocation entry.
  535.          */
  536.         __u32 delta;
  537.  
  538.         /** Offset in the buffer the relocation entry will be written into */
  539.         __u64 offset;
  540.  
  541.         /**
  542.          * Offset value of the target buffer that the relocation entry was last
  543.          * written as.
  544.          *
  545.          * If the buffer has the same offset as last time, we can skip syncing
  546.          * and writing the relocation.  This value is written back out by
  547.          * the execbuffer ioctl when the relocation is written.
  548.          */
  549.         __u64 presumed_offset;
  550.  
  551.         /**
  552.          * Target memory domains read by this operation.
  553.          */
  554.         __u32 read_domains;
  555.  
  556.         /**
  557.          * Target memory domains written by this operation.
  558.          *
  559.          * Note that only one domain may be written by the whole
  560.          * execbuffer operation, so that where there are conflicts,
  561.          * the application will get -EINVAL back.
  562.          */
  563.         __u32 write_domain;
  564. };
  565.  
  566. /** @{
  567.  * Intel memory domains
  568.  *
  569.  * Most of these just align with the various caches in
  570.  * the system and are used to flush and invalidate as
  571.  * objects end up cached in different domains.
  572.  */
  573. /** CPU cache */
  574. #define I915_GEM_DOMAIN_CPU             0x00000001
  575. /** Render cache, used by 2D and 3D drawing */
  576. #define I915_GEM_DOMAIN_RENDER          0x00000002
  577. /** Sampler cache, used by texture engine */
  578. #define I915_GEM_DOMAIN_SAMPLER         0x00000004
  579. /** Command queue, used to load batch buffers */
  580. #define I915_GEM_DOMAIN_COMMAND         0x00000008
  581. /** Instruction cache, used by shader programs */
  582. #define I915_GEM_DOMAIN_INSTRUCTION     0x00000010
  583. /** Vertex address cache */
  584. #define I915_GEM_DOMAIN_VERTEX          0x00000020
  585. /** GTT domain - aperture and scanout */
  586. #define I915_GEM_DOMAIN_GTT             0x00000040
  587. /** @} */
  588.  
  589. struct drm_i915_gem_exec_object {
  590.         /**
  591.          * User's handle for a buffer to be bound into the GTT for this
  592.          * operation.
  593.          */
  594.         __u32 handle;
  595.  
  596.         /** Number of relocations to be performed on this buffer */
  597.         __u32 relocation_count;
  598.         /**
  599.          * Pointer to array of struct drm_i915_gem_relocation_entry containing
  600.          * the relocations to be performed in this buffer.
  601.          */
  602.         __u64 relocs_ptr;
  603.  
  604.         /** Required alignment in graphics aperture */
  605.         __u64 alignment;
  606.  
  607.         /**
  608.          * Returned value of the updated offset of the object, for future
  609.          * presumed_offset writes.
  610.          */
  611.         __u64 offset;
  612. };
  613.  
  614. struct drm_i915_gem_execbuffer {
  615.         /**
  616.          * List of buffers to be validated with their relocations to be
  617.          * performend on them.
  618.          *
  619.          * This is a pointer to an array of struct drm_i915_gem_validate_entry.
  620.          *
  621.          * These buffers must be listed in an order such that all relocations
  622.          * a buffer is performing refer to buffers that have already appeared
  623.          * in the validate list.
  624.          */
  625.         __u64 buffers_ptr;
  626.         __u32 buffer_count;
  627.  
  628.         /** Offset in the batchbuffer to start execution from. */
  629.         __u32 batch_start_offset;
  630.         /** Bytes used in batchbuffer from batch_start_offset */
  631.         __u32 batch_len;
  632.         __u32 DR1;
  633.         __u32 DR4;
  634.         __u32 num_cliprects;
  635.         /** This is a struct drm_clip_rect *cliprects */
  636.         __u64 cliprects_ptr;
  637. };
  638.  
  639. struct drm_i915_gem_exec_object2 {
  640.         /**
  641.          * User's handle for a buffer to be bound into the GTT for this
  642.          * operation.
  643.          */
  644.         __u32 handle;
  645.  
  646.         /** Number of relocations to be performed on this buffer */
  647.         __u32 relocation_count;
  648.         /**
  649.          * Pointer to array of struct drm_i915_gem_relocation_entry containing
  650.          * the relocations to be performed in this buffer.
  651.          */
  652.         __u64 relocs_ptr;
  653.  
  654.         /** Required alignment in graphics aperture */
  655.         __u64 alignment;
  656.  
  657.         /**
  658.          * Returned value of the updated offset of the object, for future
  659.          * presumed_offset writes.
  660.          */
  661.         __u64 offset;
  662.  
  663. #define EXEC_OBJECT_NEEDS_FENCE (1<<0)
  664. #define EXEC_OBJECT_NEEDS_GTT   (1<<1)
  665. #define EXEC_OBJECT_WRITE       (1<<2)
  666. #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
  667.         __u64 flags;
  668.  
  669.         __u64 rsvd1;
  670.         __u64 rsvd2;
  671. };
  672.  
  673. struct drm_i915_gem_execbuffer2 {
  674.         /**
  675.          * List of gem_exec_object2 structs
  676.          */
  677.         __u64 buffers_ptr;
  678.         __u32 buffer_count;
  679.  
  680.         /** Offset in the batchbuffer to start execution from. */
  681.         __u32 batch_start_offset;
  682.         /** Bytes used in batchbuffer from batch_start_offset */
  683.         __u32 batch_len;
  684.         __u32 DR1;
  685.         __u32 DR4;
  686.         __u32 num_cliprects;
  687.         /** This is a struct drm_clip_rect *cliprects */
  688.         __u64 cliprects_ptr;
  689. #define I915_EXEC_RING_MASK              (7<<0)
  690. #define I915_EXEC_DEFAULT                (0<<0)
  691. #define I915_EXEC_RENDER                 (1<<0)
  692. #define I915_EXEC_BSD                    (2<<0)
  693. #define I915_EXEC_BLT                    (3<<0)
  694. #define I915_EXEC_VEBOX                  (4<<0)
  695.  
  696. /* Used for switching the constants addressing mode on gen4+ RENDER ring.
  697.  * Gen6+ only supports relative addressing to dynamic state (default) and
  698.  * absolute addressing.
  699.  *
  700.  * These flags are ignored for the BSD and BLT rings.
  701.  */
  702. #define I915_EXEC_CONSTANTS_MASK        (3<<6)
  703. #define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
  704. #define I915_EXEC_CONSTANTS_ABSOLUTE    (1<<6)
  705. #define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
  706.         __u64 flags;
  707.         __u64 rsvd1; /* now used for context info */
  708.         __u64 rsvd2;
  709. };
  710.  
  711. /** Resets the SO write offset registers for transform feedback on gen7. */
  712. #define I915_EXEC_GEN7_SOL_RESET        (1<<8)
  713.  
  714. /** Request a privileged ("secure") batch buffer. Note only available for
  715.  * DRM_ROOT_ONLY | DRM_MASTER processes.
  716.  */
  717. #define I915_EXEC_SECURE                (1<<9)
  718.  
  719. /** Inform the kernel that the batch is and will always be pinned. This
  720.  * negates the requirement for a workaround to be performed to avoid
  721.  * an incoherent CS (such as can be found on 830/845). If this flag is
  722.  * not passed, the kernel will endeavour to make sure the batch is
  723.  * coherent with the CS before execution. If this flag is passed,
  724.  * userspace assumes the responsibility for ensuring the same.
  725.  */
  726. #define I915_EXEC_IS_PINNED             (1<<10)
  727.  
  728. /** Provide a hint to the kernel that the command stream and auxiliary
  729.  * state buffers already holds the correct presumed addresses and so the
  730.  * relocation process may be skipped if no buffers need to be moved in
  731.  * preparation for the execbuffer.
  732.  */
  733. #define I915_EXEC_NO_RELOC              (1<<11)
  734.  
  735. /** Use the reloc.handle as an index into the exec object array rather
  736.  * than as the per-file handle.
  737.  */
  738. #define I915_EXEC_HANDLE_LUT            (1<<12)
  739.  
  740. #define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
  741.  
  742. #define I915_EXEC_CONTEXT_ID_MASK       (0xffffffff)
  743. #define i915_execbuffer2_set_context_id(eb2, context) \
  744.         (eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
  745. #define i915_execbuffer2_get_context_id(eb2) \
  746.         ((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
  747.  
  748. struct drm_i915_gem_pin {
  749.         /** Handle of the buffer to be pinned. */
  750.         __u32 handle;
  751.         __u32 pad;
  752.  
  753.         /** alignment required within the aperture */
  754.         __u64 alignment;
  755.  
  756.         /** Returned GTT offset of the buffer. */
  757.         __u64 offset;
  758. };
  759.  
  760. struct drm_i915_gem_unpin {
  761.         /** Handle of the buffer to be unpinned. */
  762.         __u32 handle;
  763.         __u32 pad;
  764. };
  765.  
  766. struct drm_i915_gem_busy {
  767.         /** Handle of the buffer to check for busy */
  768.         __u32 handle;
  769.  
  770.         /** Return busy status (1 if busy, 0 if idle).
  771.          * The high word is used to indicate on which rings the object
  772.          * currently resides:
  773.          *  16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
  774.          */
  775.         __u32 busy;
  776. };
  777.  
  778. /**
  779.  * I915_CACHING_NONE
  780.  *
  781.  * GPU access is not coherent with cpu caches. Default for machines without an
  782.  * LLC.
  783.  */
  784. #define I915_CACHING_NONE               0
  785. /**
  786.  * I915_CACHING_CACHED
  787.  *
  788.  * GPU access is coherent with cpu caches and furthermore the data is cached in
  789.  * last-level caches shared between cpu cores and the gpu GT. Default on
  790.  * machines with HAS_LLC.
  791.  */
  792. #define I915_CACHING_CACHED             1
  793. /**
  794.  * I915_CACHING_DISPLAY
  795.  *
  796.  * Special GPU caching mode which is coherent with the scanout engines.
  797.  * Transparently falls back to I915_CACHING_NONE on platforms where no special
  798.  * cache mode (like write-through or gfdt flushing) is available. The kernel
  799.  * automatically sets this mode when using a buffer as a scanout target.
  800.  * Userspace can manually set this mode to avoid a costly stall and clflush in
  801.  * the hotpath of drawing the first frame.
  802.  */
  803. #define I915_CACHING_DISPLAY            2
  804.  
  805. struct drm_i915_gem_caching {
  806.         /**
  807.          * Handle of the buffer to set/get the caching level of. */
  808.         __u32 handle;
  809.  
  810.         /**
  811.          * Cacheing level to apply or return value
  812.          *
  813.          * bits0-15 are for generic caching control (i.e. the above defined
  814.          * values). bits16-31 are reserved for platform-specific variations
  815.          * (e.g. l3$ caching on gen7). */
  816.         __u32 caching;
  817. };
  818.  
  819. #define I915_TILING_NONE        0
  820. #define I915_TILING_X           1
  821. #define I915_TILING_Y           2
  822.  
  823. #define I915_BIT_6_SWIZZLE_NONE         0
  824. #define I915_BIT_6_SWIZZLE_9            1
  825. #define I915_BIT_6_SWIZZLE_9_10         2
  826. #define I915_BIT_6_SWIZZLE_9_11         3
  827. #define I915_BIT_6_SWIZZLE_9_10_11      4
  828. /* Not seen by userland */
  829. #define I915_BIT_6_SWIZZLE_UNKNOWN      5
  830. /* Seen by userland. */
  831. #define I915_BIT_6_SWIZZLE_9_17         6
  832. #define I915_BIT_6_SWIZZLE_9_10_17      7
  833.  
  834. struct drm_i915_gem_set_tiling {
  835.         /** Handle of the buffer to have its tiling state updated */
  836.         __u32 handle;
  837.  
  838.         /**
  839.          * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
  840.          * I915_TILING_Y).
  841.          *
  842.          * This value is to be set on request, and will be updated by the
  843.          * kernel on successful return with the actual chosen tiling layout.
  844.          *
  845.          * The tiling mode may be demoted to I915_TILING_NONE when the system
  846.          * has bit 6 swizzling that can't be managed correctly by GEM.
  847.          *
  848.          * Buffer contents become undefined when changing tiling_mode.
  849.          */
  850.         __u32 tiling_mode;
  851.  
  852.         /**
  853.          * Stride in bytes for the object when in I915_TILING_X or
  854.          * I915_TILING_Y.
  855.          */
  856.         __u32 stride;
  857.  
  858.         /**
  859.          * Returned address bit 6 swizzling required for CPU access through
  860.          * mmap mapping.
  861.          */
  862.         __u32 swizzle_mode;
  863. };
  864.  
  865. struct drm_i915_gem_get_tiling {
  866.         /** Handle of the buffer to get tiling state for. */
  867.         __u32 handle;
  868.  
  869.         /**
  870.          * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
  871.          * I915_TILING_Y).
  872.          */
  873.         __u32 tiling_mode;
  874.  
  875.         /**
  876.          * Returned address bit 6 swizzling required for CPU access through
  877.          * mmap mapping.
  878.          */
  879.         __u32 swizzle_mode;
  880.  
  881.         /**
  882.          * Returned address bit 6 swizzling required for CPU access through
  883.          * mmap mapping whilst bound.
  884.          */
  885.         __u32 phys_swizzle_mode;
  886. };
  887.  
  888. struct drm_i915_gem_get_aperture {
  889.         /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
  890.         __u64 aper_size;
  891.  
  892.         /**
  893.          * Available space in the aperture used by i915_gem_execbuffer, in
  894.          * bytes
  895.          */
  896.         __u64 aper_available_size;
  897. };
  898.  
  899. struct drm_i915_get_pipe_from_crtc_id {
  900.         /** ID of CRTC being requested **/
  901.         __u32 crtc_id;
  902.  
  903.         /** pipe of requested CRTC **/
  904.         __u32 pipe;
  905. };
  906.  
  907. #define I915_MADV_WILLNEED 0
  908. #define I915_MADV_DONTNEED 1
  909. #define __I915_MADV_PURGED 2 /* internal state */
  910.  
  911. struct drm_i915_gem_madvise {
  912.         /** Handle of the buffer to change the backing store advice */
  913.         __u32 handle;
  914.  
  915.         /* Advice: either the buffer will be needed again in the near future,
  916.          *         or wont be and could be discarded under memory pressure.
  917.          */
  918.         __u32 madv;
  919.  
  920.         /** Whether the backing store still exists. */
  921.         __u32 retained;
  922. };
  923.  
  924. /* flags */
  925. #define I915_OVERLAY_TYPE_MASK          0xff
  926. #define I915_OVERLAY_YUV_PLANAR         0x01
  927. #define I915_OVERLAY_YUV_PACKED         0x02
  928. #define I915_OVERLAY_RGB                0x03
  929.  
  930. #define I915_OVERLAY_DEPTH_MASK         0xff00
  931. #define I915_OVERLAY_RGB24              0x1000
  932. #define I915_OVERLAY_RGB16              0x2000
  933. #define I915_OVERLAY_RGB15              0x3000
  934. #define I915_OVERLAY_YUV422             0x0100
  935. #define I915_OVERLAY_YUV411             0x0200
  936. #define I915_OVERLAY_YUV420             0x0300
  937. #define I915_OVERLAY_YUV410             0x0400
  938.  
  939. #define I915_OVERLAY_SWAP_MASK          0xff0000
  940. #define I915_OVERLAY_NO_SWAP            0x000000
  941. #define I915_OVERLAY_UV_SWAP            0x010000
  942. #define I915_OVERLAY_Y_SWAP             0x020000
  943. #define I915_OVERLAY_Y_AND_UV_SWAP      0x030000
  944.  
  945. #define I915_OVERLAY_FLAGS_MASK         0xff000000
  946. #define I915_OVERLAY_ENABLE             0x01000000
  947.  
  948. struct drm_intel_overlay_put_image {
  949.         /* various flags and src format description */
  950.         __u32 flags;
  951.         /* source picture description */
  952.         __u32 bo_handle;
  953.         /* stride values and offsets are in bytes, buffer relative */
  954.         __u16 stride_Y; /* stride for packed formats */
  955.         __u16 stride_UV;
  956.         __u32 offset_Y; /* offset for packet formats */
  957.         __u32 offset_U;
  958.         __u32 offset_V;
  959.         /* in pixels */
  960.         __u16 src_width;
  961.         __u16 src_height;
  962.         /* to compensate the scaling factors for partially covered surfaces */
  963.         __u16 src_scan_width;
  964.         __u16 src_scan_height;
  965.         /* output crtc description */
  966.         __u32 crtc_id;
  967.         __u16 dst_x;
  968.         __u16 dst_y;
  969.         __u16 dst_width;
  970.         __u16 dst_height;
  971. };
  972.  
  973. /* flags */
  974. #define I915_OVERLAY_UPDATE_ATTRS       (1<<0)
  975. #define I915_OVERLAY_UPDATE_GAMMA       (1<<1)
  976. struct drm_intel_overlay_attrs {
  977.         __u32 flags;
  978.         __u32 color_key;
  979.         __s32 brightness;
  980.         __u32 contrast;
  981.         __u32 saturation;
  982.         __u32 gamma0;
  983.         __u32 gamma1;
  984.         __u32 gamma2;
  985.         __u32 gamma3;
  986.         __u32 gamma4;
  987.         __u32 gamma5;
  988. };
  989.  
  990. /*
  991.  * Intel sprite handling
  992.  *
  993.  * Color keying works with a min/mask/max tuple.  Both source and destination
  994.  * color keying is allowed.
  995.  *
  996.  * Source keying:
  997.  * Sprite pixels within the min & max values, masked against the color channels
  998.  * specified in the mask field, will be transparent.  All other pixels will
  999.  * be displayed on top of the primary plane.  For RGB surfaces, only the min
  1000.  * and mask fields will be used; ranged compares are not allowed.
  1001.  *
  1002.  * Destination keying:
  1003.  * Primary plane pixels that match the min value, masked against the color
  1004.  * channels specified in the mask field, will be replaced by corresponding
  1005.  * pixels from the sprite plane.
  1006.  *
  1007.  * Note that source & destination keying are exclusive; only one can be
  1008.  * active on a given plane.
  1009.  */
  1010.  
  1011. #define I915_SET_COLORKEY_NONE          (1<<0) /* disable color key matching */
  1012. #define I915_SET_COLORKEY_DESTINATION   (1<<1)
  1013. #define I915_SET_COLORKEY_SOURCE        (1<<2)
  1014. struct drm_intel_sprite_colorkey {
  1015.         __u32 plane_id;
  1016.         __u32 min_value;
  1017.         __u32 channel_mask;
  1018.         __u32 max_value;
  1019.         __u32 flags;
  1020. };
  1021.  
  1022. struct drm_i915_gem_wait {
  1023.         /** Handle of BO we shall wait on */
  1024.         __u32 bo_handle;
  1025.         __u32 flags;
  1026.         /** Number of nanoseconds to wait, Returns time remaining. */
  1027.         __s64 timeout_ns;
  1028. };
  1029.  
  1030. struct drm_i915_gem_context_create {
  1031.         /*  output: id of new context*/
  1032.         __u32 ctx_id;
  1033.         __u32 pad;
  1034. };
  1035.  
  1036. struct drm_i915_gem_context_destroy {
  1037.         __u32 ctx_id;
  1038.         __u32 pad;
  1039. };
  1040.  
  1041. struct drm_i915_reg_read {
  1042.         __u64 offset;
  1043.         __u64 val; /* Return value */
  1044. };
  1045.  
  1046. struct drm_i915_reset_stats {
  1047.         __u32 ctx_id;
  1048.         __u32 flags;
  1049.  
  1050.         /* All resets since boot/module reload, for all contexts */
  1051.         __u32 reset_count;
  1052.  
  1053.         /* Number of batches lost when active in GPU, for this context */
  1054.         __u32 batch_active;
  1055.  
  1056.         /* Number of batches lost pending for execution, for this context */
  1057.         __u32 batch_pending;
  1058.  
  1059.         __u32 pad;
  1060. };
  1061.  
  1062. struct drm_i915_gem_userptr {
  1063.         __u64 user_ptr;
  1064.         __u64 user_size;
  1065.         __u32 flags;
  1066. #define I915_USERPTR_READ_ONLY 0x1
  1067. #define I915_USERPTR_UNSYNCHRONIZED 0x80000000
  1068.         /**
  1069.          * Returned handle for the object.
  1070.          *
  1071.          * Object handles are nonzero.
  1072.          */
  1073.         __u32 handle;
  1074. };
  1075.  
  1076. struct drm_i915_mask {
  1077.     __u32 handle;
  1078.     __u32 width;
  1079.     __u32 height;
  1080.     __u32 bo_size;
  1081.     __u32 bo_pitch;
  1082.     __u32 bo_map;
  1083. };
  1084.  
  1085. struct drm_i915_fb_info {
  1086.         __u32 name;
  1087.         __u32 width;
  1088.         __u32 height;
  1089.         __u32 pitch;
  1090.         __u32 tiling;
  1091.         __u32 crtc;
  1092.         __u32 pipe;
  1093. };
  1094.  
  1095. struct drm_i915_mask_update {
  1096.     __u32 handle;
  1097.     __u32 dx;
  1098.     __u32 dy;
  1099.     __u32 width;
  1100.     __u32 height;
  1101.     __u32 bo_pitch;
  1102.     __u32 bo_map;
  1103. };
  1104.  
  1105.  
  1106. #endif /* _UAPI_I915_DRM_H_ */
  1107.