Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4362 → Rev 4363

/contrib/sdk/sources/libdrm/Makefile
0,0 → 1,57
 
LIBRARY= libdrm
 
CC=gcc
CFLAGS = -U_Win32 -U_WIN32 -U__MINGW32__ -c -O2 -fomit-frame-pointer
 
LD = ld
 
AR= ar
 
STRIP = $(PREFIX)strip
 
LDFLAGS:= -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --version-script libdrm.ver --image-base=0 --out-implib $(LIBRARY).dll.a
 
INCLUDES= -I. -I./include/drm -I../newlib/include
 
LIBPATH:= -L../../lib
 
LIBS:= -ldll -lc.dll
 
 
DEFINES=
 
 
SOURCES = xf86drm.c \
intel/intel_bufmgr.c \
intel/intel_bufmgr_gem.c
 
OBJECTS = $(patsubst %.c, %.o, $(SOURCES))
 
 
# targets
 
 
all:$(LIBRARY).a $(LIBRARY).dll
 
 
$(LIBRARY).a: $(OBJECTS) Makefile
ar cvrs $(LIBRARY).a $(OBJECTS)
mv -f $(LIBRARY).a ../../lib
 
$(LIBRARY).dll: $(OBJECTS) Makefile
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ $(OBJECTS) $(LIBS)
$(STRIP) $@
mv -f $@ ../../bin
mv -f $(LIBRARY).dll.a ../../lib
 
%.o : %.c Makefile
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ $<
 
clean:
-rm -f *.o
 
 
 
 
/contrib/sdk/sources/libdrm/include/drm/drm.h
0,0 → 1,871
/**
* \file drm.h
* Header for the Direct Rendering Manager
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*
* \par Acknowledgments:
* Dec 1999, Richard Henderson <rth@twiddle.net>, move to generic \c cmpxchg.
*/
 
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#ifndef _DRM_H_
#define _DRM_H_
 
#include <stdint.h>
 
typedef int8_t __s8;
typedef uint8_t __u8;
typedef int16_t __s16;
typedef uint16_t __u16;
typedef int32_t __s32;
typedef uint32_t __u32;
typedef int64_t __s64;
typedef uint64_t __u64;
typedef unsigned long drm_handle_t;
 
 
 
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
#define DRM_RAM_PERCENT 10 /**< How much system ram can we lock? */
 
#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
#define _DRM_LOCK_IS_HELD(lock) ((lock) & _DRM_LOCK_HELD)
#define _DRM_LOCK_IS_CONT(lock) ((lock) & _DRM_LOCK_CONT)
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
 
typedef unsigned int drm_context_t;
typedef unsigned int drm_drawable_t;
typedef unsigned int drm_magic_t;
 
/**
* Cliprect.
*
* \warning: If you change this structure, make sure you change
* XF86DRIClipRectRec in the server as well
*
* \note KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
};
 
/**
* Drawable information.
*/
struct drm_drawable_info {
unsigned int num_rects;
struct drm_clip_rect *rects;
};
 
/**
* Texture region,
*/
struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
};
 
/**
* Hardware lock.
*
* The lock structure is a simple cache-line aligned integer. To avoid
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
};
 
/**
* DRM_IOCTL_VERSION ioctl argument type.
*
* \sa drmGetVersion().
*/
struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
size_t name_len; /**< Length of name buffer */
char *name; /**< Name of driver */
size_t date_len; /**< Length of date buffer */
char *date; /**< User-space buffer to hold date */
size_t desc_len; /**< Length of desc buffer */
char *desc; /**< User-space buffer to hold desc */
};
 
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
*/
struct drm_unique {
size_t unique_len; /**< Length of unique */
char *unique; /**< Unique name for driver instantiation */
};
 
struct drm_list {
int count; /**< Length of user-space structures */
struct drm_version *version;
};
 
struct drm_block {
int unused;
};
 
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
*/
struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
DRM_INST_HANDLER,
DRM_UNINST_HANDLER
} func;
int irq;
};
 
/**
* Type of memory to map.
*/
enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
_DRM_AGP = 3, /**< AGP/GART */
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_GEM = 6 /**< GEM object */
};
 
/**
* Memory mapping flags.
*/
enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
_DRM_KERNEL = 0x08, /**< kernel requires access */
_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Managed by driver */
};
 
struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
};
 
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
* argument type.
*
* \sa drmAddMap().
*/
struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
enum drm_map_type type; /**< Type of memory to map */
enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
};
 
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
};
 
enum drm_stat_type {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
_DRM_STAT_IOCTLS,
_DRM_STAT_LOCKS,
_DRM_STAT_UNLOCKS,
_DRM_STAT_VALUE, /**< Generic value */
_DRM_STAT_BYTE, /**< Generic byte counter (1024bytes/K) */
_DRM_STAT_COUNT, /**< Generic non-byte counter (1000/k) */
 
_DRM_STAT_IRQ, /**< IRQ */
_DRM_STAT_PRIMARY, /**< Primary DMA bytes */
_DRM_STAT_SECONDARY, /**< Secondary DMA bytes */
_DRM_STAT_DMA, /**< DMA */
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
};
 
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
struct drm_stats {
unsigned long count;
struct {
unsigned long value;
enum drm_stat_type type;
} data[15];
};
 
/**
* Hardware locking flags.
*/
enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
_DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
};
 
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
struct drm_lock {
int context;
enum drm_lock_flags flags;
};
 
/**
* DMA flags
*
* \warning
* These values \e must match xf86drm.h.
*
* \sa drm_dma.
*/
enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note The buffer may not yet have
* been processed by the hardware --
* getting a hardware lock with the
* hardware quiescent will ensure
* that the buffer has been
* processed.
*/
_DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
_DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
 
/* Flags for DMA buffer request */
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
};
 
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
enum {
_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
_DRM_SG_BUFFER = 0x04, /**< Scatter/gather memory buffer */
_DRM_FB_BUFFER = 0x08, /**< Buffer is in frame buffer */
_DRM_PCI_BUFFER_RO = 0x10 /**< Map PCI DMA buffer read-only */
} flags;
unsigned long agp_start; /**<
* Start address of where the AGP buffers are
* in the AGP aperture
*/
};
 
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
struct drm_buf_info {
int count; /**< Entries in list */
struct drm_buf_desc *list;
};
 
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
struct drm_buf_free {
int count;
int *list;
};
 
/**
* Buffer information
*
* \sa drm_buf_map.
*/
struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void *address; /**< Address of buffer */
};
 
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
struct drm_buf_map {
int count; /**< Length of the buffer list */
#ifdef __cplusplus
void *virt;
#else
void *virtual; /**< Mmap'd area in user-virtual */
#endif
struct drm_buf_pub *list; /**< Buffer information */
};
 
/**
* DRM_IOCTL_DMA ioctl argument type.
*
* Indices here refer to the offset into the buffer list in drm_buf_get.
*
* \sa drmDMA().
*/
struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int *send_indices; /**< List of handles to buffers */
int *send_sizes; /**< Lengths of data to send */
enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int *request_indices; /**< Buffer information */
int *request_sizes;
int granted_count; /**< Number of buffers granted */
};
 
enum drm_ctx_flags {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
};
 
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
struct drm_ctx {
drm_context_t handle;
enum drm_ctx_flags flags;
};
 
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
struct drm_ctx_res {
int count;
struct drm_ctx *contexts;
};
 
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
struct drm_draw {
drm_drawable_t handle;
};
 
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
*/
typedef enum {
DRM_DRAWABLE_CLIPRECTS
} drm_drawable_info_type_t;
 
struct drm_update_draw {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
unsigned long long data;
};
 
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
struct drm_auth {
drm_magic_t magic;
};
 
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
*/
struct drm_irq_busid {
int irq; /**< IRQ number */
int busnum; /**< bus number */
int devnum; /**< device number */
int funcnum; /**< function number */
};
 
enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
};
 
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)
 
struct drm_wait_vblank_request {
enum drm_vblank_seq_type type;
unsigned int sequence;
unsigned long signal;
};
 
struct drm_wait_vblank_reply {
enum drm_vblank_seq_type type;
unsigned int sequence;
long tval_sec;
long tval_usec;
};
 
/**
* DRM_IOCTL_WAIT_VBLANK ioctl argument type.
*
* \sa drmWaitVBlank().
*/
union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
};
 
#define _DRM_PRE_MODESET 1
#define _DRM_POST_MODESET 2
 
/**
* DRM_IOCTL_MODESET_CTL ioctl argument type
*
* \sa drmModesetCtl().
*/
struct drm_modeset_ctl {
__u32 crtc;
__u32 cmd;
};
 
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
*/
struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
};
 
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
*/
struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
};
 
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
};
 
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
*
* \sa drmAgpVersionMajor(), drmAgpVersionMinor(), drmAgpGetMode(),
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
* drmAgpVendorId() and drmAgpDeviceId().
*/
struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
unsigned long aperture_base; /* physical address */
unsigned long aperture_size; /* bytes */
unsigned long memory_allowed; /* bytes */
unsigned long memory_used;
 
/* PCI information */
unsigned short id_vendor;
unsigned short id_device;
};
 
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
struct drm_scatter_gather {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for mapping / unmapping */
};
 
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
};
 
/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
struct drm_gem_close {
/** Handle of the object to be closed. */
__u32 handle;
__u32 pad;
};
 
/** DRM_IOCTL_GEM_FLINK ioctl argument type */
struct drm_gem_flink {
/** Handle for the object being named */
__u32 handle;
 
/** Returned global name */
__u32 name;
};
 
/** DRM_IOCTL_GEM_OPEN ioctl argument type */
struct drm_gem_open {
/** Name of object being opened */
__u32 name;
 
/** Returned handle for the object */
__u32 handle;
 
/** Returned size of the object */
__u64 size;
};
 
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
__u64 capability;
__u64 value;
};
 
/**
* DRM_CLIENT_CAP_STEREO_3D
*
* if set to 1, the DRM core will expose the stereo 3D capabilities of the
* monitor by advertising the supported 3D layouts in the flags of struct
* drm_mode_modeinfo.
*/
#define DRM_CLIENT_CAP_STEREO_3D 1
 
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
__u64 value;
};
 
#define DRM_CLOEXEC O_CLOEXEC
struct drm_prime_handle {
__u32 handle;
 
/** Flags.. only applicable for handle->fd */
__u32 flags;
 
/** Returned dmabuf file descriptor */
__s32 fd;
};
 
#define SRV_GET_PCI_INFO 20
#define SRV_I915_GET_PARAM 21
#define SRV_I915_GEM_CREATE 22
#define SRV_DRM_GEM_CLOSE 23
#define SRV_DRM_GEM_FLINK 24
#define SRV_DRM_GEM_OPEN 25
#define SRV_I915_GEM_PIN 26
#define SRV_I915_GEM_UNPIN 27
#define SRV_I915_GEM_SET_CACHING 28
#define SRV_I915_GEM_PWRITE 29
#define SRV_I915_GEM_BUSY 30
#define SRV_I915_GEM_SET_DOMAIN 31
#define SRV_I915_GEM_MMAP 32
#define SRV_I915_GEM_SET_TILING 33
#define SRV_I915_GEM_GET_TILING 34
#define SRV_I915_GEM_GET_APERTURE 35
#define SRV_I915_GEM_MMAP_GTT 36
#define SRV_I915_GEM_THROTTLE 37
#define SRV_I915_GEM_EXECBUFFER2 38
#define SRV_I915_GEM_WAIT 39
#define SRV_I915_GEM_CONTEXT_CREATE 40
#define SRV_I915_GEM_CONTEXT_DESTROY 41
#define SRV_I915_REG_READ 42
 
#define SRV_FBINFO 43
#define SRV_MASK_UPDATE 44
 
 
 
#include "drm_mode.h"
 
#define DRM_IOCTL_BASE 'd'
#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
 
#define DRM_IOCTL_VERSION
#define DRM_IOCTL_GET_UNIQUE
#define DRM_IOCTL_GET_MAGIC
#define DRM_IOCTL_IRQ_BUSID
#define DRM_IOCTL_GET_MAP
#define DRM_IOCTL_GET_CLIENT
#define DRM_IOCTL_GET_STATS
#define DRM_IOCTL_SET_VERSION
#define DRM_IOCTL_MODESET_CTL
#define DRM_IOCTL_GEM_CLOSE SRV_DRM_GEM_CLOSE
#define DRM_IOCTL_GEM_FLINK SRV_DRM_GEM_FLINK
#define DRM_IOCTL_GEM_OPEN SRV_DRM_GEM_OPEN
#define DRM_IOCTL_GET_CAP
#define DRM_IOCTL_SET_CLIENT_CAP
 
#define DRM_IOCTL_SET_UNIQUE
#define DRM_IOCTL_AUTH_MAGIC
#define DRM_IOCTL_BLOCK
#define DRM_IOCTL_UNBLOCK
#define DRM_IOCTL_CONTROL
#define DRM_IOCTL_ADD_MAP
#define DRM_IOCTL_ADD_BUFS
#define DRM_IOCTL_MARK_BUFS
#define DRM_IOCTL_INFO_BUFS
#define DRM_IOCTL_MAP_BUFS
#define DRM_IOCTL_FREE_BUFS
 
#define DRM_IOCTL_RM_MAP
 
#define DRM_IOCTL_SET_SAREA_CTX
#define DRM_IOCTL_GET_SAREA_CTX
 
#define DRM_IOCTL_SET_MASTER
#define DRM_IOCTL_DROP_MASTER
 
#define DRM_IOCTL_ADD_CTX
#define DRM_IOCTL_RM_CTX
#define DRM_IOCTL_MOD_CTX
#define DRM_IOCTL_GET_CTX
#define DRM_IOCTL_SWITCH_CTX
#define DRM_IOCTL_NEW_CTX
#define DRM_IOCTL_RES_CTX
#define DRM_IOCTL_ADD_DRAW
#define DRM_IOCTL_RM_DRAW
#define DRM_IOCTL_DMA
#define DRM_IOCTL_LOCK
#define DRM_IOCTL_UNLOCK
#define DRM_IOCTL_FINISH
 
#define DRM_IOCTL_PRIME_HANDLE_TO_FD
#define DRM_IOCTL_PRIME_FD_TO_HANDLE
 
#define DRM_IOCTL_AGP_ACQUIRE
#define DRM_IOCTL_AGP_RELEASE
#define DRM_IOCTL_AGP_ENABLE
#define DRM_IOCTL_AGP_INFO
#define DRM_IOCTL_AGP_ALLOC
#define DRM_IOCTL_AGP_FREE
#define DRM_IOCTL_AGP_BIND
#define DRM_IOCTL_AGP_UNBIND
 
#define DRM_IOCTL_SG_ALLOC
#define DRM_IOCTL_SG_FREE
 
#define DRM_IOCTL_WAIT_VBLANK
 
#define DRM_IOCTL_UPDATE_DRAW
 
#define DRM_IOCTL_MODE_GETRESOURCES
#define DRM_IOCTL_MODE_GETCRTC
#define DRM_IOCTL_MODE_SETCRTC
#define DRM_IOCTL_MODE_CURSOR
#define DRM_IOCTL_MODE_GETGAMMA
#define DRM_IOCTL_MODE_SETGAMMA
#define DRM_IOCTL_MODE_GETENCODER
#define DRM_IOCTL_MODE_GETCONNECTOR
#define DRM_IOCTL_MODE_ATTACHMODE
#define DRM_IOCTL_MODE_DETACHMODE
 
#define DRM_IOCTL_MODE_GETPROPERTY
#define DRM_IOCTL_MODE_SETPROPERTY
#define DRM_IOCTL_MODE_GETPROPBLOB
#define DRM_IOCTL_MODE_GETFB
#define DRM_IOCTL_MODE_ADDFB
#define DRM_IOCTL_MODE_RMFB
#define DRM_IOCTL_MODE_PAGE_FLIP
#define DRM_IOCTL_MODE_DIRTYFB
 
#define DRM_IOCTL_MODE_CREATE_DUMB
#define DRM_IOCTL_MODE_MAP_DUMB
#define DRM_IOCTL_MODE_DESTROY_DUMB
#define DRM_IOCTL_MODE_GETPLANERESOURCES
#define DRM_IOCTL_MODE_GETPLANE
#define DRM_IOCTL_MODE_SETPLANE
#define DRM_IOCTL_MODE_ADDFB2
#define DRM_IOCTL_MODE_OBJ_GETPROPERTIES
#define DRM_IOCTL_MODE_OBJ_SETPROPERTY
#define DRM_IOCTL_MODE_CURSOR2
 
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x99.
* Generic IOCTLS restart at 0xA0.
*
* \sa drmCommandNone(), drmCommandRead(), drmCommandWrite(), and
* drmCommandReadWrite().
*/
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
 
/**
* Header for events written back to userspace on the drm fd. The
* type defines the type of event, the length specifies the total
* length of the event (including the header), and user_data is
* typically a 64 bit value passed with the ioctl that triggered the
* event. A read on the drm fd will always only return complete
* events, that is, if for example the read buffer is 100 bytes, and
* there are two 64 byte events pending, only one will be returned.
*
* Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
* up are chipset specific.
*/
struct drm_event {
__u32 type;
__u32 length;
};
 
#define DRM_EVENT_VBLANK 0x01
#define DRM_EVENT_FLIP_COMPLETE 0x02
 
struct drm_event_vblank {
struct drm_event base;
__u64 user_data;
__u32 tv_sec;
__u32 tv_usec;
__u32 sequence;
__u32 reserved;
};
 
#define DRM_CAP_DUMB_BUFFER 0x1
#define DRM_CAP_VBLANK_HIGH_CRTC 0x2
#define DRM_CAP_DUMB_PREFERRED_DEPTH 0x3
#define DRM_CAP_DUMB_PREFER_SHADOW 0x4
#define DRM_CAP_PRIME 0x5
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
 
#define DRM_PRIME_CAP_IMPORT 0x1
#define DRM_PRIME_CAP_EXPORT 0x2
 
/* typedef area */
typedef struct drm_clip_rect drm_clip_rect_t;
typedef struct drm_drawable_info drm_drawable_info_t;
typedef struct drm_tex_region drm_tex_region_t;
typedef struct drm_hw_lock drm_hw_lock_t;
typedef struct drm_version drm_version_t;
typedef struct drm_unique drm_unique_t;
typedef struct drm_list drm_list_t;
typedef struct drm_block drm_block_t;
typedef struct drm_control drm_control_t;
typedef enum drm_map_type drm_map_type_t;
typedef enum drm_map_flags drm_map_flags_t;
typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
typedef struct drm_map drm_map_t;
typedef struct drm_client drm_client_t;
typedef enum drm_stat_type drm_stat_type_t;
typedef struct drm_stats drm_stats_t;
typedef enum drm_lock_flags drm_lock_flags_t;
typedef struct drm_lock drm_lock_t;
typedef enum drm_dma_flags drm_dma_flags_t;
typedef struct drm_buf_desc drm_buf_desc_t;
typedef struct drm_buf_info drm_buf_info_t;
typedef struct drm_buf_free drm_buf_free_t;
typedef struct drm_buf_pub drm_buf_pub_t;
typedef struct drm_buf_map drm_buf_map_t;
typedef struct drm_dma drm_dma_t;
typedef union drm_wait_vblank drm_wait_vblank_t;
typedef struct drm_agp_mode drm_agp_mode_t;
typedef enum drm_ctx_flags drm_ctx_flags_t;
typedef struct drm_ctx drm_ctx_t;
typedef struct drm_ctx_res drm_ctx_res_t;
typedef struct drm_draw drm_draw_t;
typedef struct drm_update_draw drm_update_draw_t;
typedef struct drm_auth drm_auth_t;
typedef struct drm_irq_busid drm_irq_busid_t;
typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
 
typedef struct drm_agp_buffer drm_agp_buffer_t;
typedef struct drm_agp_binding drm_agp_binding_t;
typedef struct drm_agp_info drm_agp_info_t;
typedef struct drm_scatter_gather drm_scatter_gather_t;
typedef struct drm_set_version drm_set_version_t;
 
int drmIoctl(int fd, unsigned long request, void *arg);
 
#endif
/contrib/sdk/sources/libdrm/include/drm/drm_fourcc.h
0,0 → 1,130
/*
* Copyright 2011 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
 
#ifndef DRM_FOURCC_H
#define DRM_FOURCC_H
 
#include <inttypes.h>
 
#define fourcc_code(a,b,c,d) ((uint32_t)(a) | ((uint32_t)(b) << 8) | \
((uint32_t)(c) << 16) | ((uint32_t)(d) << 24))
 
#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
 
/* color index */
#define DRM_FORMAT_C8 fourcc_code('C', '8', ' ', ' ') /* [7:0] C */
 
/* 8 bpp RGB */
#define DRM_FORMAT_RGB332 fourcc_code('R', 'G', 'B', '8') /* [7:0] R:G:B 3:3:2 */
#define DRM_FORMAT_BGR233 fourcc_code('B', 'G', 'R', '8') /* [7:0] B:G:R 2:3:3 */
 
/* 16 bpp RGB */
#define DRM_FORMAT_XRGB4444 fourcc_code('X', 'R', '1', '2') /* [15:0] x:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_XBGR4444 fourcc_code('X', 'B', '1', '2') /* [15:0] x:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBX4444 fourcc_code('R', 'X', '1', '2') /* [15:0] R:G:B:x 4:4:4:4 little endian */
#define DRM_FORMAT_BGRX4444 fourcc_code('B', 'X', '1', '2') /* [15:0] B:G:R:x 4:4:4:4 little endian */
 
#define DRM_FORMAT_ARGB4444 fourcc_code('A', 'R', '1', '2') /* [15:0] A:R:G:B 4:4:4:4 little endian */
#define DRM_FORMAT_ABGR4444 fourcc_code('A', 'B', '1', '2') /* [15:0] A:B:G:R 4:4:4:4 little endian */
#define DRM_FORMAT_RGBA4444 fourcc_code('R', 'A', '1', '2') /* [15:0] R:G:B:A 4:4:4:4 little endian */
#define DRM_FORMAT_BGRA4444 fourcc_code('B', 'A', '1', '2') /* [15:0] B:G:R:A 4:4:4:4 little endian */
 
#define DRM_FORMAT_XRGB1555 fourcc_code('X', 'R', '1', '5') /* [15:0] x:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_XBGR1555 fourcc_code('X', 'B', '1', '5') /* [15:0] x:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBX5551 fourcc_code('R', 'X', '1', '5') /* [15:0] R:G:B:x 5:5:5:1 little endian */
#define DRM_FORMAT_BGRX5551 fourcc_code('B', 'X', '1', '5') /* [15:0] B:G:R:x 5:5:5:1 little endian */
 
#define DRM_FORMAT_ARGB1555 fourcc_code('A', 'R', '1', '5') /* [15:0] A:R:G:B 1:5:5:5 little endian */
#define DRM_FORMAT_ABGR1555 fourcc_code('A', 'B', '1', '5') /* [15:0] A:B:G:R 1:5:5:5 little endian */
#define DRM_FORMAT_RGBA5551 fourcc_code('R', 'A', '1', '5') /* [15:0] R:G:B:A 5:5:5:1 little endian */
#define DRM_FORMAT_BGRA5551 fourcc_code('B', 'A', '1', '5') /* [15:0] B:G:R:A 5:5:5:1 little endian */
 
#define DRM_FORMAT_RGB565 fourcc_code('R', 'G', '1', '6') /* [15:0] R:G:B 5:6:5 little endian */
#define DRM_FORMAT_BGR565 fourcc_code('B', 'G', '1', '6') /* [15:0] B:G:R 5:6:5 little endian */
 
/* 24 bpp RGB */
#define DRM_FORMAT_RGB888 fourcc_code('R', 'G', '2', '4') /* [23:0] R:G:B little endian */
#define DRM_FORMAT_BGR888 fourcc_code('B', 'G', '2', '4') /* [23:0] B:G:R little endian */
 
/* 32 bpp RGB */
#define DRM_FORMAT_XRGB8888 fourcc_code('X', 'R', '2', '4') /* [31:0] x:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_XBGR8888 fourcc_code('X', 'B', '2', '4') /* [31:0] x:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBX8888 fourcc_code('R', 'X', '2', '4') /* [31:0] R:G:B:x 8:8:8:8 little endian */
#define DRM_FORMAT_BGRX8888 fourcc_code('B', 'X', '2', '4') /* [31:0] B:G:R:x 8:8:8:8 little endian */
 
#define DRM_FORMAT_ARGB8888 fourcc_code('A', 'R', '2', '4') /* [31:0] A:R:G:B 8:8:8:8 little endian */
#define DRM_FORMAT_ABGR8888 fourcc_code('A', 'B', '2', '4') /* [31:0] A:B:G:R 8:8:8:8 little endian */
#define DRM_FORMAT_RGBA8888 fourcc_code('R', 'A', '2', '4') /* [31:0] R:G:B:A 8:8:8:8 little endian */
#define DRM_FORMAT_BGRA8888 fourcc_code('B', 'A', '2', '4') /* [31:0] B:G:R:A 8:8:8:8 little endian */
 
#define DRM_FORMAT_XRGB2101010 fourcc_code('X', 'R', '3', '0') /* [31:0] x:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_XBGR2101010 fourcc_code('X', 'B', '3', '0') /* [31:0] x:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBX1010102 fourcc_code('R', 'X', '3', '0') /* [31:0] R:G:B:x 10:10:10:2 little endian */
#define DRM_FORMAT_BGRX1010102 fourcc_code('B', 'X', '3', '0') /* [31:0] B:G:R:x 10:10:10:2 little endian */
 
#define DRM_FORMAT_ARGB2101010 fourcc_code('A', 'R', '3', '0') /* [31:0] A:R:G:B 2:10:10:10 little endian */
#define DRM_FORMAT_ABGR2101010 fourcc_code('A', 'B', '3', '0') /* [31:0] A:B:G:R 2:10:10:10 little endian */
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
 
/* packed YCbCr */
#define DRM_FORMAT_YUYV fourcc_code('Y', 'U', 'Y', 'V') /* [31:0] Cr0:Y1:Cb0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_YVYU fourcc_code('Y', 'V', 'Y', 'U') /* [31:0] Cb0:Y1:Cr0:Y0 8:8:8:8 little endian */
#define DRM_FORMAT_UYVY fourcc_code('U', 'Y', 'V', 'Y') /* [31:0] Y1:Cr0:Y0:Cb0 8:8:8:8 little endian */
#define DRM_FORMAT_VYUY fourcc_code('V', 'Y', 'U', 'Y') /* [31:0] Y1:Cb0:Y0:Cr0 8:8:8:8 little endian */
 
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
 
/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
* or
* index 1 = Cb:Cr plane, [15:0] Cb:Cr little endian
*/
#define DRM_FORMAT_NV12 fourcc_code('N', 'V', '1', '2') /* 2x2 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */
 
/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
* index 2: Cr plane, [7:0] Cr
* or
* index 1: Cr plane, [7:0] Cr
* index 2: Cb plane, [7:0] Cb
*/
#define DRM_FORMAT_YUV410 fourcc_code('Y', 'U', 'V', '9') /* 4x4 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU410 fourcc_code('Y', 'V', 'U', '9') /* 4x4 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV411 fourcc_code('Y', 'U', '1', '1') /* 4x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU411 fourcc_code('Y', 'V', '1', '1') /* 4x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV420 fourcc_code('Y', 'U', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU420 fourcc_code('Y', 'V', '1', '2') /* 2x2 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV422 fourcc_code('Y', 'U', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU422 fourcc_code('Y', 'V', '1', '6') /* 2x1 subsampled Cr (1) and Cb (2) planes */
#define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */
#define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */
 
#endif /* DRM_FOURCC_H */
/contrib/sdk/sources/libdrm/include/drm/drm_mode.h
0,0 → 1,490
/*
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com>
* Copyright (c) 2008 Red Hat Inc.
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
* Copyright (c) 2007-2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
 
#ifndef _DRM_MODE_H
#define _DRM_MODE_H
 
#define DRM_DISPLAY_INFO_LEN 32
#define DRM_CONNECTOR_NAME_LEN 32
#define DRM_DISPLAY_MODE_LEN 32
#define DRM_PROP_NAME_LEN 32
 
#define DRM_MODE_TYPE_BUILTIN (1<<0)
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN)
#define DRM_MODE_TYPE_PREFERRED (1<<3)
#define DRM_MODE_TYPE_DEFAULT (1<<4)
#define DRM_MODE_TYPE_USERDEF (1<<5)
#define DRM_MODE_TYPE_DRIVER (1<<6)
 
/* Video mode flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_FLAG_PHSYNC (1<<0)
#define DRM_MODE_FLAG_NHSYNC (1<<1)
#define DRM_MODE_FLAG_PVSYNC (1<<2)
#define DRM_MODE_FLAG_NVSYNC (1<<3)
#define DRM_MODE_FLAG_INTERLACE (1<<4)
#define DRM_MODE_FLAG_DBLSCAN (1<<5)
#define DRM_MODE_FLAG_CSYNC (1<<6)
#define DRM_MODE_FLAG_PCSYNC (1<<7)
#define DRM_MODE_FLAG_NCSYNC (1<<8)
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */
#define DRM_MODE_FLAG_BCAST (1<<10)
#define DRM_MODE_FLAG_PIXMUX (1<<11)
#define DRM_MODE_FLAG_DBLCLK (1<<12)
#define DRM_MODE_FLAG_CLKDIV2 (1<<13)
#define DRM_MODE_FLAG_3D_MASK (0x1f<<14)
#define DRM_MODE_FLAG_3D_NONE (0<<14)
#define DRM_MODE_FLAG_3D_FRAME_PACKING (1<<14)
#define DRM_MODE_FLAG_3D_FIELD_ALTERNATIVE (2<<14)
#define DRM_MODE_FLAG_3D_LINE_ALTERNATIVE (3<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_FULL (4<<14)
#define DRM_MODE_FLAG_3D_L_DEPTH (5<<14)
#define DRM_MODE_FLAG_3D_L_DEPTH_GFX_GFX_DEPTH (6<<14)
#define DRM_MODE_FLAG_3D_TOP_AND_BOTTOM (7<<14)
#define DRM_MODE_FLAG_3D_SIDE_BY_SIDE_HALF (8<<14)
 
 
/* DPMS flags */
/* bit compatible with the xorg definitions. */
#define DRM_MODE_DPMS_ON 0
#define DRM_MODE_DPMS_STANDBY 1
#define DRM_MODE_DPMS_SUSPEND 2
#define DRM_MODE_DPMS_OFF 3
 
/* Scaling mode options */
#define DRM_MODE_SCALE_NONE 0 /* Unmodified timing (display or
software can still scale) */
#define DRM_MODE_SCALE_FULLSCREEN 1 /* Full screen, ignore aspect */
#define DRM_MODE_SCALE_CENTER 2 /* Centered, no scaling */
#define DRM_MODE_SCALE_ASPECT 3 /* Full screen, preserve aspect */
 
/* Dithering mode options */
#define DRM_MODE_DITHERING_OFF 0
#define DRM_MODE_DITHERING_ON 1
#define DRM_MODE_DITHERING_AUTO 2
 
/* Dirty info options */
#define DRM_MODE_DIRTY_OFF 0
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
 
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
 
__u32 vrefresh;
 
__u32 flags;
__u32 type;
char name[DRM_DISPLAY_MODE_LEN];
};
 
struct drm_mode_card_res {
__u64 fb_id_ptr;
__u64 crtc_id_ptr;
__u64 connector_id_ptr;
__u64 encoder_id_ptr;
__u32 count_fbs;
__u32 count_crtcs;
__u32 count_connectors;
__u32 count_encoders;
__u32 min_width, max_width;
__u32 min_height, max_height;
};
 
struct drm_mode_crtc {
__u64 set_connectors_ptr;
__u32 count_connectors;
 
__u32 crtc_id; /**< Id */
__u32 fb_id; /**< Id of framebuffer */
 
__u32 x, y; /**< Position on the frameuffer */
 
__u32 gamma_size;
__u32 mode_valid;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_PRESENT_TOP_FIELD (1<<0)
#define DRM_MODE_PRESENT_BOTTOM_FIELD (1<<1)
 
/* Planes blend with or override other bits on the CRTC */
struct drm_mode_set_plane {
__u32 plane_id;
__u32 crtc_id;
__u32 fb_id; /* fb object contains surface format type */
__u32 flags;
 
/* Signed dest location allows it to be partially off screen */
__s32 crtc_x, crtc_y;
__u32 crtc_w, crtc_h;
 
/* Source values are 16.16 fixed point */
__u32 src_x, src_y;
__u32 src_h, src_w;
};
 
struct drm_mode_get_plane {
__u32 plane_id;
 
__u32 crtc_id;
__u32 fb_id;
 
__u32 possible_crtcs;
__u32 gamma_size;
 
__u32 count_format_types;
__u64 format_type_ptr;
};
 
struct drm_mode_get_plane_res {
__u64 plane_id_ptr;
__u32 count_planes;
};
 
#define DRM_MODE_ENCODER_NONE 0
#define DRM_MODE_ENCODER_DAC 1
#define DRM_MODE_ENCODER_TMDS 2
#define DRM_MODE_ENCODER_LVDS 3
#define DRM_MODE_ENCODER_TVDAC 4
 
struct drm_mode_get_encoder {
__u32 encoder_id;
__u32 encoder_type;
 
__u32 crtc_id; /**< Id of crtc */
 
__u32 possible_crtcs;
__u32 possible_clones;
};
 
/* This is for connectors with multiple signal types. */
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */
#define DRM_MODE_SUBCONNECTOR_Automatic 0
#define DRM_MODE_SUBCONNECTOR_Unknown 0
#define DRM_MODE_SUBCONNECTOR_DVID 3
#define DRM_MODE_SUBCONNECTOR_DVIA 4
#define DRM_MODE_SUBCONNECTOR_Composite 5
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6
#define DRM_MODE_SUBCONNECTOR_Component 8
#define DRM_MODE_SUBCONNECTOR_SCART 9
 
#define DRM_MODE_CONNECTOR_Unknown 0
#define DRM_MODE_CONNECTOR_VGA 1
#define DRM_MODE_CONNECTOR_DVII 2
#define DRM_MODE_CONNECTOR_DVID 3
#define DRM_MODE_CONNECTOR_DVIA 4
#define DRM_MODE_CONNECTOR_Composite 5
#define DRM_MODE_CONNECTOR_SVIDEO 6
#define DRM_MODE_CONNECTOR_LVDS 7
#define DRM_MODE_CONNECTOR_Component 8
#define DRM_MODE_CONNECTOR_9PinDIN 9
#define DRM_MODE_CONNECTOR_DisplayPort 10
#define DRM_MODE_CONNECTOR_HDMIA 11
#define DRM_MODE_CONNECTOR_HDMIB 12
#define DRM_MODE_CONNECTOR_TV 13
#define DRM_MODE_CONNECTOR_eDP 14
 
struct drm_mode_get_connector {
 
__u64 encoders_ptr;
__u64 modes_ptr;
__u64 props_ptr;
__u64 prop_values_ptr;
 
__u32 count_modes;
__u32 count_props;
__u32 count_encoders;
 
__u32 encoder_id; /**< Current Encoder */
__u32 connector_id; /**< Id */
__u32 connector_type;
__u32 connector_type_id;
 
__u32 connection;
__u32 mm_width, mm_height; /**< HxW in millimeters */
__u32 subpixel;
};
 
#define DRM_MODE_PROP_PENDING (1<<0)
#define DRM_MODE_PROP_RANGE (1<<1)
#define DRM_MODE_PROP_IMMUTABLE (1<<2)
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */
#define DRM_MODE_PROP_BLOB (1<<4)
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */
 
struct drm_mode_property_enum {
__u64 value;
char name[DRM_PROP_NAME_LEN];
};
 
struct drm_mode_get_property {
__u64 values_ptr; /* values and blob lengths */
__u64 enum_blob_ptr; /* enum and blob id ptrs */
 
__u32 prop_id;
__u32 flags;
char name[DRM_PROP_NAME_LEN];
 
__u32 count_values;
__u32 count_enum_blobs;
};
 
struct drm_mode_connector_set_property {
__u64 value;
__u32 prop_id;
__u32 connector_id;
};
 
#define DRM_MODE_OBJECT_CRTC 0xcccccccc
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0
#define DRM_MODE_OBJECT_MODE 0xdededede
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee
 
struct drm_mode_obj_get_properties {
__u64 props_ptr;
__u64 prop_values_ptr;
__u32 count_props;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_obj_set_property {
__u64 value;
__u32 prop_id;
__u32 obj_id;
__u32 obj_type;
};
 
struct drm_mode_get_blob {
__u32 blob_id;
__u32 length;
__u64 data;
};
 
struct drm_mode_fb_cmd {
__u32 fb_id;
__u32 width, height;
__u32 pitch;
__u32 bpp;
__u32 depth;
/* driver specific handle */
__u32 handle;
};
 
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
 
struct drm_mode_fb_cmd2 {
__u32 fb_id;
__u32 width, height;
__u32 pixel_format; /* fourcc code from drm_fourcc.h */
__u32 flags;
 
/*
* In case of planar formats, this ioctl allows up to 4
* buffer objects with offsets and pitches per plane.
* The pitch and offset order is dictated by the fourcc,
* e.g. NV12 (http://fourcc.org/yuv.php#NV12) is described as:
*
* YUV 4:2:0 image with a plane of 8 bit Y samples
* followed by an interleaved U/V plane containing
* 8 bit 2x2 subsampled colour difference samples.
*
* So it would consist of Y as offset[0] and UV as
* offset[1]. Note that offset[0] will generally
* be 0.
*/
__u32 handles[4];
__u32 pitches[4]; /* pitch for each plane */
__u32 offsets[4]; /* offset of each plane */
};
 
#define DRM_MODE_FB_DIRTY_ANNOTATE_COPY 0x01
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02
#define DRM_MODE_FB_DIRTY_FLAGS 0x03
 
/*
* Mark a region of a framebuffer as dirty.
*
* Some hardware does not automatically update display contents
* as a hardware or software draw to a framebuffer. This ioctl
* allows userspace to tell the kernel and the hardware what
* regions of the framebuffer have changed.
*
* The kernel or hardware is free to update more then just the
* region specified by the clip rects. The kernel or hardware
* may also delay and/or coalesce several calls to dirty into a
* single update.
*
* Userspace may annotate the updates, the annotates are a
* promise made by the caller that the change is either a copy
* of pixels or a fill of a single color in the region specified.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_COPY flag is given then
* the number of updated regions are half of num_clips given,
* where the clip rects are paired in src and dst. The width and
* height of each one of the pairs must match.
*
* If the DRM_MODE_FB_DIRTY_ANNOTATE_FILL flag is given the caller
* promises that the region specified of the clip rects is filled
* completely with a single color as given in the color argument.
*/
 
struct drm_mode_fb_dirty_cmd {
__u32 fb_id;
__u32 flags;
__u32 color;
__u32 num_clips;
__u64 clips_ptr;
};
 
struct drm_mode_mode_cmd {
__u32 connector_id;
struct drm_mode_modeinfo mode;
};
 
#define DRM_MODE_CURSOR_BO (1<<0)
#define DRM_MODE_CURSOR_MOVE (1<<1)
 
/*
* depending on the value in flags diffrent members are used.
*
* CURSOR_BO uses
* crtc
* width
* height
* handle - if 0 turns the cursor of
*
* CURSOR_MOVE uses
* crtc
* x
* y
*/
struct drm_mode_cursor {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
};
 
struct drm_mode_cursor2 {
__u32 flags;
__u32 crtc_id;
__s32 x;
__s32 y;
__u32 width;
__u32 height;
/* driver specific handle */
__u32 handle;
__s32 hot_x;
__s32 hot_y;
};
 
struct drm_mode_crtc_lut {
__u32 crtc_id;
__u32 gamma_size;
 
/* pointers to arrays */
__u64 red;
__u64 green;
__u64 blue;
};
 
#define DRM_MODE_PAGE_FLIP_EVENT 0x01
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_FLAGS (DRM_MODE_PAGE_FLIP_EVENT|DRM_MODE_PAGE_FLIP_ASYNC)
 
/*
* Request a page flip on the specified crtc.
*
* This ioctl will ask KMS to schedule a page flip for the specified
* crtc. Once any pending rendering targeting the specified fb (as of
* ioctl time) has completed, the crtc will be reprogrammed to display
* that fb after the next vertical refresh. The ioctl returns
* immediately, but subsequent rendering to the current fb will block
* in the execbuffer ioctl until the page flip happens. If a page
* flip is already pending as the ioctl is called, EBUSY will be
* returned.
*
* The ioctl supports one flag, DRM_MODE_PAGE_FLIP_EVENT, which will
* request that drm sends back a vblank event (see drm.h: struct
* drm_event_vblank) when the page flip is done. The user_data field
* passed in with this ioctl will be returned as the user_data field
* in the vblank event struct.
*
* The reserved field must be zero until we figure out something
* clever to use it for.
*/
 
struct drm_mode_crtc_page_flip {
__u32 crtc_id;
__u32 fb_id;
__u32 flags;
__u32 reserved;
__u64 user_data;
};
 
/* create a dumb scanout buffer */
struct drm_mode_create_dumb {
__u32 height;
__u32 width;
__u32 bpp;
__u32 flags;
/* handle, pitch, size will be returned */
__u32 handle;
__u32 pitch;
__u64 size;
};
 
/* set up for mmap of a dumb scanout buffer */
struct drm_mode_map_dumb {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_mode_destroy_dumb {
__u32 handle;
};
 
#endif
/contrib/sdk/sources/libdrm/include/drm/i915_drm.h
0,0 → 1,982
/*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef _I915_DRM_H_
#define _I915_DRM_H_
 
#include "drm.h"
 
/* Please note that modifications to all structs defined here are
* subject to backwards-compatibility constraints.
*/
 
 
/* Each region is a minimum of 16k, and there are at most 255 of them.
*/
#define I915_NR_TEX_REGIONS 255 /* table size 2k - maximum due to use
* of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14
 
typedef struct _drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
I915_RESUME_DMA = 0x03
} func;
unsigned int mmio_offset;
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
unsigned int ring_size;
unsigned int front_offset;
unsigned int back_offset;
unsigned int depth_offset;
unsigned int w;
unsigned int h;
unsigned int pitch;
unsigned int pitch_bits;
unsigned int back_pitch;
unsigned int depth_pitch;
unsigned int cpp;
unsigned int chipset;
} drm_i915_init_t;
 
typedef struct _drm_i915_sarea {
struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
int ctxOwner; /* last context to upload state */
int texAge;
int pf_enabled; /* is pageflipping allowed? */
int pf_active;
int pf_current_page; /* which buffer is being displayed? */
int perf_boxes; /* performance boxes to be displayed */
int width, height; /* screen size in pixels */
 
drm_handle_t front_handle;
int front_offset;
int front_size;
 
drm_handle_t back_handle;
int back_offset;
int back_size;
 
drm_handle_t depth_handle;
int depth_offset;
int depth_size;
 
drm_handle_t tex_handle;
int tex_offset;
int tex_size;
int log_tex_granularity;
int pitch;
int rotation; /* 0, 90, 180 or 270 */
int rotated_offset;
int rotated_size;
int rotated_pitch;
int virtualX, virtualY;
 
unsigned int front_tiled;
unsigned int back_tiled;
unsigned int depth_tiled;
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
 
int pipeA_x;
int pipeA_y;
int pipeA_w;
int pipeA_h;
int pipeB_x;
int pipeB_y;
int pipeB_w;
int pipeB_h;
 
/* fill out some space for old userspace triple buffer */
drm_handle_t unused_handle;
__u32 unused1, unused2, unused3;
 
/* buffer object handles for static buffers. May change
* over the lifetime of the client.
*/
__u32 front_bo_handle;
__u32 back_bo_handle;
__u32 unused_bo_handle;
__u32 depth_bo_handle;
 
} drm_i915_sarea_t;
 
/* due to userspace building against these headers we need some compat here */
#define planeA_x pipeA_x
#define planeA_y pipeA_y
#define planeA_w pipeA_w
#define planeA_h pipeA_h
#define planeB_x pipeB_x
#define planeB_y pipeB_y
#define planeB_w pipeB_w
#define planeB_h pipeB_h
 
/* Flags for perf_boxes
*/
#define I915_BOX_RING_EMPTY 0x1
#define I915_BOX_FLIP 0x2
#define I915_BOX_WAIT 0x4
#define I915_BOX_TEXTURE_LOAD 0x8
#define I915_BOX_LOST_CONTEXT 0x10
 
/* I915 specific ioctls
* The device specific ioctl range is 0x40 to 0x79.
*/
#define DRM_I915_INIT 0x00
#define DRM_I915_FLUSH 0x01
#define DRM_I915_FLIP 0x02
#define DRM_I915_BATCHBUFFER 0x03
#define DRM_I915_IRQ_EMIT 0x04
#define DRM_I915_IRQ_WAIT 0x05
#define DRM_I915_GETPARAM 0x06
#define DRM_I915_SETPARAM 0x07
#define DRM_I915_ALLOC 0x08
#define DRM_I915_FREE 0x09
#define DRM_I915_INIT_HEAP 0x0a
#define DRM_I915_CMDBUFFER 0x0b
#define DRM_I915_DESTROY_HEAP 0x0c
#define DRM_I915_SET_VBLANK_PIPE 0x0d
#define DRM_I915_GET_VBLANK_PIPE 0x0e
#define DRM_I915_VBLANK_SWAP 0x0f
#define DRM_I915_HWS_ADDR 0x11
#define DRM_I915_GEM_INIT 0x13
#define DRM_I915_GEM_EXECBUFFER 0x14
#define DRM_I915_GEM_PIN 0x15
#define DRM_I915_GEM_UNPIN 0x16
#define DRM_I915_GEM_BUSY 0x17
#define DRM_I915_GEM_THROTTLE 0x18
#define DRM_I915_GEM_ENTERVT 0x19
#define DRM_I915_GEM_LEAVEVT 0x1a
#define DRM_I915_GEM_CREATE 0x1b
#define DRM_I915_GEM_PREAD 0x1c
#define DRM_I915_GEM_PWRITE 0x1d
#define DRM_I915_GEM_MMAP 0x1e
#define DRM_I915_GEM_SET_DOMAIN 0x1f
#define DRM_I915_GEM_SW_FINISH 0x20
#define DRM_I915_GEM_SET_TILING 0x21
#define DRM_I915_GEM_GET_TILING 0x22
#define DRM_I915_GEM_GET_APERTURE 0x23
#define DRM_I915_GEM_MMAP_GTT 0x24
#define DRM_I915_GET_PIPE_FROM_CRTC_ID 0x25
#define DRM_I915_GEM_MADVISE 0x26
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
#define DRM_I915_GEM_CONTEXT_CREATE 0x2d
#define DRM_I915_GEM_CONTEXT_DESTROY 0x2e
#define DRM_I915_GEM_SET_CACHEING 0x2f
#define DRM_I915_GEM_GET_CACHEING 0x30
#define DRM_I915_REG_READ 0x31
#define DRM_I915_GET_RESET_STATS 0x32
 
#define DRM_IOCTL_I915_INIT
#define DRM_IOCTL_I915_FLUSH
#define DRM_IOCTL_I915_FLIP
#define DRM_IOCTL_I915_BATCHBUFFER
#define DRM_IOCTL_I915_IRQ_EMIT
#define DRM_IOCTL_I915_IRQ_WAIT
#define DRM_IOCTL_I915_GETPARAM SRV_I915_GET_PARAM
#define DRM_IOCTL_I915_SETPARAM
#define DRM_IOCTL_I915_ALLOC
#define DRM_IOCTL_I915_FREE
#define DRM_IOCTL_I915_INIT_HEAP
#define DRM_IOCTL_I915_CMDBUFFER
#define DRM_IOCTL_I915_DESTROY_HEAP
#define DRM_IOCTL_I915_SET_VBLANK_PIPE
#define DRM_IOCTL_I915_GET_VBLANK_PIPE
#define DRM_IOCTL_I915_VBLANK_SWAP
#define DRM_IOCTL_I915_HWS_ADDR
#define DRM_IOCTL_I915_GEM_INIT
#define DRM_IOCTL_I915_GEM_EXECBUFFER
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 SRV_I915_GEM_EXECBUFFER2
#define DRM_IOCTL_I915_GEM_PIN SRV_I915_GEM_PIN
#define DRM_IOCTL_I915_GEM_UNPIN SRV_I915_GEM_UNPIN
#define DRM_IOCTL_I915_GEM_BUSY SRV_I915_GEM_BUSY
#define DRM_IOCTL_I915_GEM_SET_CACHEING SRV_I915_GEM_SET_CACHING
#define DRM_IOCTL_I915_GEM_GET_CACHEING
#define DRM_IOCTL_I915_GEM_THROTTLE SRV_I915_GEM_THROTTLE
#define DRM_IOCTL_I915_GEM_ENTERVT
#define DRM_IOCTL_I915_GEM_LEAVEVT
#define DRM_IOCTL_I915_GEM_CREATE SRV_I915_GEM_CREATE
#define DRM_IOCTL_I915_GEM_PREAD
#define DRM_IOCTL_I915_GEM_PWRITE SRV_I915_GEM_PWRITE
#define DRM_IOCTL_I915_GEM_MMAP SRV_I915_GEM_MMAP
#define DRM_IOCTL_I915_GEM_MMAP_GTT SRV_I915_GEM_MMAP_GTT
#define DRM_IOCTL_I915_GEM_SET_DOMAIN SRV_I915_GEM_SET_DOMAIN
#define DRM_IOCTL_I915_GEM_SW_FINISH
#define DRM_IOCTL_I915_GEM_SET_TILING SRV_I915_GEM_SET_TILING
#define DRM_IOCTL_I915_GEM_GET_TILING SRV_I915_GEM_GET_TILING
#define DRM_IOCTL_I915_GEM_GET_APERTURE SRV_I915_GEM_GET_APERTURE
#define DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID
#define DRM_IOCTL_I915_GEM_MADVISE
#define DRM_IOCTL_I915_OVERLAY_PUT_IMAGE
#define DRM_IOCTL_I915_OVERLAY_ATTRS
#define DRM_IOCTL_I915_SET_SPRITE_COLORKEY
#define DRM_IOCTL_I915_GET_SPRITE_COLORKEY
#define DRM_IOCTL_I915_GEM_WAIT SRV_I915_GEM_WAIT
#define DRM_IOCTL_I915_GEM_CONTEXT_CREATE SRV_I915_GEM_CONTEXT_CREATE
#define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY SRV_I915_GEM_CONTEXT_DESTROY
#define DRM_IOCTL_I915_REG_READ SRV_I915_REG_READ
 
 
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
 
/* As above, but pass a pointer to userspace buffer which can be
* validated by the kernel prior to sending to hardware.
*/
typedef struct _drm_i915_cmdbuffer {
char *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
struct drm_clip_rect *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
 
/* Userspace can request & wait on irq's:
*/
typedef struct drm_i915_irq_emit {
int *irq_seq;
} drm_i915_irq_emit_t;
 
typedef struct drm_i915_irq_wait {
int irq_seq;
} drm_i915_irq_wait_t;
 
/* Ioctl to query kernel params:
*/
#define I915_PARAM_IRQ_ACTIVE 1
#define I915_PARAM_ALLOW_BATCHBUFFER 2
#define I915_PARAM_LAST_DISPATCH 3
#define I915_PARAM_CHIPSET_ID 4
#define I915_PARAM_HAS_GEM 5
#define I915_PARAM_NUM_FENCES_AVAIL 6
#define I915_PARAM_HAS_OVERLAY 7
#define I915_PARAM_HAS_PAGEFLIPPING 8
#define I915_PARAM_HAS_EXECBUF2 9
#define I915_PARAM_HAS_BSD 10
#define I915_PARAM_HAS_BLT 11
#define I915_PARAM_HAS_RELAXED_FENCING 12
#define I915_PARAM_HAS_COHERENT_RINGS 13
#define I915_PARAM_HAS_EXEC_CONSTANTS 14
#define I915_PARAM_HAS_RELAXED_DELTA 15
#define I915_PARAM_HAS_GEN7_SOL_RESET 16
#define I915_PARAM_HAS_LLC 17
#define I915_PARAM_HAS_ALIASING_PPGTT 18
#define I915_PARAM_HAS_WAIT_TIMEOUT 19
#define I915_PARAM_HAS_VEBOX 22
 
typedef struct drm_i915_getparam {
int param;
int *value;
} drm_i915_getparam_t;
 
/* Ioctl to set kernel params:
*/
#define I915_SETPARAM_USE_MI_BATCHBUFFER_START 1
#define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2
#define I915_SETPARAM_ALLOW_BATCHBUFFER 3
#define I915_SETPARAM_NUM_USED_FENCES 4
 
typedef struct drm_i915_setparam {
int param;
int value;
} drm_i915_setparam_t;
 
/* A memory manager for regions of shared memory:
*/
#define I915_MEM_REGION_AGP 1
 
typedef struct drm_i915_mem_alloc {
int region;
int alignment;
int size;
int *region_offset; /* offset from start of fb or agp */
} drm_i915_mem_alloc_t;
 
typedef struct drm_i915_mem_free {
int region;
int region_offset;
} drm_i915_mem_free_t;
 
typedef struct drm_i915_mem_init_heap {
int region;
int size;
int start;
} drm_i915_mem_init_heap_t;
 
/* Allow memory manager to be torn down and re-initialized (eg on
* rotate):
*/
typedef struct drm_i915_mem_destroy_heap {
int region;
} drm_i915_mem_destroy_heap_t;
 
/* Allow X server to configure which pipes to monitor for vblank signals
*/
#define DRM_I915_VBLANK_PIPE_A 1
#define DRM_I915_VBLANK_PIPE_B 2
 
typedef struct drm_i915_vblank_pipe {
int pipe;
} drm_i915_vblank_pipe_t;
 
/* Schedule buffer swap at given vertical blank:
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
 
typedef struct drm_i915_hws_addr {
__u64 addr;
} drm_i915_hws_addr_t;
 
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_start;
/**
* Ending offset in the GTT to be managed by the DRM memory
* manager.
*/
__u64 gtt_end;
};
 
struct drm_i915_gem_create {
/**
* Requested size for the object.
*
* The (page-aligned) allocated size for the object will be returned.
*/
__u64 size;
/**
* Returned handle for the object.
*
* Object handles are nonzero.
*/
__u32 handle;
__u32 pad;
};
 
struct drm_i915_gem_pread {
/** Handle for the object being read. */
__u32 handle;
__u32 pad;
/** Offset into the object to read from */
__u64 offset;
/** Length of data to read */
__u64 size;
/**
* Pointer to write the data into.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
 
struct drm_i915_gem_pwrite {
/** Handle for the object being written to. */
__u32 handle;
__u32 pad;
/** Offset into the object to write to */
__u64 offset;
/** Length of data to write */
__u64 size;
/**
* Pointer to read the data from.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 data_ptr;
};
 
struct drm_i915_gem_mmap {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/** Offset in the object to map. */
__u64 offset;
/**
* Length of data to map.
*
* The value will be page-aligned.
*/
__u64 size;
/**
* Returned pointer the data was mapped at.
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 addr_ptr;
};
 
struct drm_i915_gem_mmap_gtt {
/** Handle for the object being mapped. */
__u32 handle;
__u32 pad;
/**
* Fake offset to use for subsequent mmap call
*
* This is a fixed-size type for 32/64 compatibility.
*/
__u64 offset;
};
 
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
 
/** New read domains */
__u32 read_domains;
 
/** New write domain */
__u32 write_domain;
};
 
struct drm_i915_gem_sw_finish {
/** Handle for the object */
__u32 handle;
};
 
struct drm_i915_gem_relocation_entry {
/**
* Handle of the buffer being pointed to by this relocation entry.
*
* It's appealing to make this be an index into the mm_validate_entry
* list to refer to the buffer, but this allows the driver to create
* a relocation list for state buffers and not re-write it per
* exec using the buffer.
*/
__u32 target_handle;
 
/**
* Value to be added to the offset of the target buffer to make up
* the relocation entry.
*/
__u32 delta;
 
/** Offset in the buffer the relocation entry will be written into */
__u64 offset;
 
/**
* Offset value of the target buffer that the relocation entry was last
* written as.
*
* If the buffer has the same offset as last time, we can skip syncing
* and writing the relocation. This value is written back out by
* the execbuffer ioctl when the relocation is written.
*/
__u64 presumed_offset;
 
/**
* Target memory domains read by this operation.
*/
__u32 read_domains;
 
/**
* Target memory domains written by this operation.
*
* Note that only one domain may be written by the whole
* execbuffer operation, so that where there are conflicts,
* the application will get -EINVAL back.
*/
__u32 write_domain;
};
 
/** @{
* Intel memory domains
*
* Most of these just align with the various caches in
* the system and are used to flush and invalidate as
* objects end up cached in different domains.
*/
/** CPU cache */
#define I915_GEM_DOMAIN_CPU 0x00000001
/** Render cache, used by 2D and 3D drawing */
#define I915_GEM_DOMAIN_RENDER 0x00000002
/** Sampler cache, used by texture engine */
#define I915_GEM_DOMAIN_SAMPLER 0x00000004
/** Command queue, used to load batch buffers */
#define I915_GEM_DOMAIN_COMMAND 0x00000008
/** Instruction cache, used by shader programs */
#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
/** Vertex address cache */
#define I915_GEM_DOMAIN_VERTEX 0x00000020
/** GTT domain - aperture and scanout */
#define I915_GEM_DOMAIN_GTT 0x00000040
/** @} */
 
struct drm_i915_gem_exec_object {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
 
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
 
/** Required alignment in graphics aperture */
__u64 alignment;
 
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
};
 
struct drm_i915_gem_execbuffer {
/**
* List of buffers to be validated with their relocations to be
* performend on them.
*
* This is a pointer to an array of struct drm_i915_gem_validate_entry.
*
* These buffers must be listed in an order such that all relocations
* a buffer is performing refer to buffers that have already appeared
* in the validate list.
*/
__u64 buffers_ptr;
__u32 buffer_count;
 
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
};
 
struct drm_i915_gem_exec_object2 {
/**
* User's handle for a buffer to be bound into the GTT for this
* operation.
*/
__u32 handle;
 
/** Number of relocations to be performed on this buffer */
__u32 relocation_count;
/**
* Pointer to array of struct drm_i915_gem_relocation_entry containing
* the relocations to be performed in this buffer.
*/
__u64 relocs_ptr;
 
/** Required alignment in graphics aperture */
__u64 alignment;
 
/**
* Returned value of the updated offset of the object, for future
* presumed_offset writes.
*/
__u64 offset;
 
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
 
struct drm_i915_gem_execbuffer2 {
/**
* List of gem_exec_object2 structs
*/
__u64 buffers_ptr;
__u32 buffer_count;
 
/** Offset in the batchbuffer to start execution from. */
__u32 batch_start_offset;
/** Bytes used in batchbuffer from batch_start_offset */
__u32 batch_len;
__u32 DR1;
__u32 DR4;
__u32 num_cliprects;
/** This is a struct drm_clip_rect *cliprects */
__u64 cliprects_ptr;
#define I915_EXEC_RING_MASK (7<<0)
#define I915_EXEC_DEFAULT (0<<0)
#define I915_EXEC_RENDER (1<<0)
#define I915_EXEC_BSD (2<<0)
#define I915_EXEC_BLT (3<<0)
#define I915_EXEC_VEBOX (4<<0)
 
/* Used for switching the constants addressing mode on gen4+ RENDER ring.
* Gen6+ only supports relative addressing to dynamic state (default) and
* absolute addressing.
*
* These flags are ignored for the BSD and BLT rings.
*/
#define I915_EXEC_CONSTANTS_MASK (3<<6)
#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
__u64 flags;
__u64 rsvd1; /* now used for context info */
__u64 rsvd2;
};
 
/** Resets the SO write offset registers for transform feedback on gen7. */
#define I915_EXEC_GEN7_SOL_RESET (1<<8)
 
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK
#define i915_execbuffer2_get_context_id(eb2) \
((eb2).rsvd1 & I915_EXEC_CONTEXT_ID_MASK)
 
struct drm_i915_gem_pin {
/** Handle of the buffer to be pinned. */
__u32 handle;
__u32 pad;
 
/** alignment required within the aperture */
__u64 alignment;
 
/** Returned GTT offset of the buffer. */
__u64 offset;
};
 
struct drm_i915_gem_unpin {
/** Handle of the buffer to be unpinned. */
__u32 handle;
__u32 pad;
};
 
struct drm_i915_gem_busy {
/** Handle of the buffer to check for busy */
__u32 handle;
 
/** Return busy status (1 if busy, 0 if idle).
* The high word is used to indicate on which rings the object
* currently resides:
* 16:31 - busy (r or r/w) rings (16 render, 17 bsd, 18 blt, etc)
*/
__u32 busy;
};
 
#define I915_CACHEING_NONE 0
#define I915_CACHEING_CACHED 1
 
struct drm_i915_gem_cacheing {
/**
* Handle of the buffer to set/get the cacheing level of. */
__u32 handle;
 
/**
* Cacheing level to apply or return value
*
* bits0-15 are for generic cacheing control (i.e. the above defined
* values). bits16-31 are reserved for platform-specific variations
* (e.g. l3$ caching on gen7). */
__u32 cacheing;
};
 
#define I915_TILING_NONE 0
#define I915_TILING_X 1
#define I915_TILING_Y 2
 
#define I915_BIT_6_SWIZZLE_NONE 0
#define I915_BIT_6_SWIZZLE_9 1
#define I915_BIT_6_SWIZZLE_9_10 2
#define I915_BIT_6_SWIZZLE_9_11 3
#define I915_BIT_6_SWIZZLE_9_10_11 4
/* Not seen by userland */
#define I915_BIT_6_SWIZZLE_UNKNOWN 5
/* Seen by userland. */
#define I915_BIT_6_SWIZZLE_9_17 6
#define I915_BIT_6_SWIZZLE_9_10_17 7
 
struct drm_i915_gem_set_tiling {
/** Handle of the buffer to have its tiling state updated */
__u32 handle;
 
/**
* Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*
* This value is to be set on request, and will be updated by the
* kernel on successful return with the actual chosen tiling layout.
*
* The tiling mode may be demoted to I915_TILING_NONE when the system
* has bit 6 swizzling that can't be managed correctly by GEM.
*
* Buffer contents become undefined when changing tiling_mode.
*/
__u32 tiling_mode;
 
/**
* Stride in bytes for the object when in I915_TILING_X or
* I915_TILING_Y.
*/
__u32 stride;
 
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
 
struct drm_i915_gem_get_tiling {
/** Handle of the buffer to get tiling state for. */
__u32 handle;
 
/**
* Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
* I915_TILING_Y).
*/
__u32 tiling_mode;
 
/**
* Returned address bit 6 swizzling required for CPU access through
* mmap mapping.
*/
__u32 swizzle_mode;
};
 
struct drm_i915_gem_get_aperture {
/** Total size of the aperture used by i915_gem_execbuffer, in bytes */
__u64 aper_size;
 
/**
* Available space in the aperture used by i915_gem_execbuffer, in
* bytes
*/
__u64 aper_available_size;
};
 
struct drm_i915_get_pipe_from_crtc_id {
/** ID of CRTC being requested **/
__u32 crtc_id;
 
/** pipe of requested CRTC **/
__u32 pipe;
};
 
#define I915_MADV_WILLNEED 0
#define I915_MADV_DONTNEED 1
#define __I915_MADV_PURGED 2 /* internal state */
 
struct drm_i915_gem_madvise {
/** Handle of the buffer to change the backing store advice */
__u32 handle;
 
/* Advice: either the buffer will be needed again in the near future,
* or wont be and could be discarded under memory pressure.
*/
__u32 madv;
 
/** Whether the backing store still exists. */
__u32 retained;
};
 
/* flags */
#define I915_OVERLAY_TYPE_MASK 0xff
#define I915_OVERLAY_YUV_PLANAR 0x01
#define I915_OVERLAY_YUV_PACKED 0x02
#define I915_OVERLAY_RGB 0x03
 
#define I915_OVERLAY_DEPTH_MASK 0xff00
#define I915_OVERLAY_RGB24 0x1000
#define I915_OVERLAY_RGB16 0x2000
#define I915_OVERLAY_RGB15 0x3000
#define I915_OVERLAY_YUV422 0x0100
#define I915_OVERLAY_YUV411 0x0200
#define I915_OVERLAY_YUV420 0x0300
#define I915_OVERLAY_YUV410 0x0400
 
#define I915_OVERLAY_SWAP_MASK 0xff0000
#define I915_OVERLAY_NO_SWAP 0x000000
#define I915_OVERLAY_UV_SWAP 0x010000
#define I915_OVERLAY_Y_SWAP 0x020000
#define I915_OVERLAY_Y_AND_UV_SWAP 0x030000
 
#define I915_OVERLAY_FLAGS_MASK 0xff000000
#define I915_OVERLAY_ENABLE 0x01000000
 
struct drm_intel_overlay_put_image {
/* various flags and src format description */
__u32 flags;
/* source picture description */
__u32 bo_handle;
/* stride values and offsets are in bytes, buffer relative */
__u16 stride_Y; /* stride for packed formats */
__u16 stride_UV;
__u32 offset_Y; /* offset for packet formats */
__u32 offset_U;
__u32 offset_V;
/* in pixels */
__u16 src_width;
__u16 src_height;
/* to compensate the scaling factors for partially covered surfaces */
__u16 src_scan_width;
__u16 src_scan_height;
/* output crtc description */
__u32 crtc_id;
__u16 dst_x;
__u16 dst_y;
__u16 dst_width;
__u16 dst_height;
};
 
/* flags */
#define I915_OVERLAY_UPDATE_ATTRS (1<<0)
#define I915_OVERLAY_UPDATE_GAMMA (1<<1)
struct drm_intel_overlay_attrs {
__u32 flags;
__u32 color_key;
__s32 brightness;
__u32 contrast;
__u32 saturation;
__u32 gamma0;
__u32 gamma1;
__u32 gamma2;
__u32 gamma3;
__u32 gamma4;
__u32 gamma5;
};
 
/*
* Intel sprite handling
*
* Color keying works with a min/mask/max tuple. Both source and destination
* color keying is allowed.
*
* Source keying:
* Sprite pixels within the min & max values, masked against the color channels
* specified in the mask field, will be transparent. All other pixels will
* be displayed on top of the primary plane. For RGB surfaces, only the min
* and mask fields will be used; ranged compares are not allowed.
*
* Destination keying:
* Primary plane pixels that match the min value, masked against the color
* channels specified in the mask field, will be replaced by corresponding
* pixels from the sprite plane.
*
* Note that source & destination keying are exclusive; only one can be
* active on a given plane.
*/
 
#define I915_SET_COLORKEY_NONE (1<<0) /* disable color key matching */
#define I915_SET_COLORKEY_DESTINATION (1<<1)
#define I915_SET_COLORKEY_SOURCE (1<<2)
struct drm_intel_sprite_colorkey {
__u32 plane_id;
__u32 min_value;
__u32 channel_mask;
__u32 max_value;
__u32 flags;
};
 
struct drm_i915_gem_wait {
/** Handle of BO we shall wait on */
__u32 bo_handle;
__u32 flags;
/** Number of nanoseconds to wait, Returns time remaining. */
__s64 timeout_ns;
};
 
struct drm_i915_gem_context_create {
/* output: id of new context*/
__u32 ctx_id;
__u32 pad;
};
 
struct drm_i915_gem_context_destroy {
__u32 ctx_id;
__u32 pad;
};
 
struct drm_i915_reg_read {
__u64 offset;
__u64 val; /* Return value */
};
 
struct drm_i915_reset_stats {
__u32 ctx_id;
__u32 flags;
 
/* All resets since boot/module reload, for all contexts */
__u32 reset_count;
 
/* Number of batches lost when active in GPU, for this context */
__u32 batch_active;
 
/* Number of batches lost pending for execution, for this context */
__u32 batch_pending;
 
__u32 pad;
};
 
struct drm_i915_mask_update {
__u32 handle;
__u32 width;
__u32 height;
__u32 bo_size;
__u32 bo_pitch;
__u32 bo_map;
};
 
struct drm_i915_fb_info {
__u32 name;
__u32 width;
__u32 height;
__u32 pitch;
__u32 tiling;
};
 
#endif /* _I915_DRM_H_ */
/contrib/sdk/sources/libdrm/intel/intel_aub.h
0,0 → 1,153
/*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
 
/** @file intel_aub.h
*
* The AUB file is a file format used by Intel's internal simulation
* and other validation tools. It can be used at various levels by a
* driver to input state to the simulated hardware or a replaying
* debugger.
*
* We choose to dump AUB files using the trace block format for ease
* of implementation -- dump out the blocks of memory as plain blobs
* and insert ring commands to execute the batchbuffer blob.
*/
 
#ifndef _INTEL_AUB_H
#define _INTEL_AUB_H
 
#define AUB_MI_NOOP (0)
#define AUB_MI_BATCH_BUFFER_START (0x31 << 23)
#define AUB_PIPE_CONTROL (0x7a000002)
 
/* DW0: instruction type. */
 
#define CMD_AUB (7 << 29)
 
#define CMD_AUB_HEADER (CMD_AUB | (1 << 23) | (0x05 << 16))
/* DW1 */
# define AUB_HEADER_MAJOR_SHIFT 24
# define AUB_HEADER_MINOR_SHIFT 16
 
#define CMD_AUB_TRACE_HEADER_BLOCK (CMD_AUB | (1 << 23) | (0x41 << 16))
#define CMD_AUB_DUMP_BMP (CMD_AUB | (1 << 23) | (0x9e << 16))
 
/* DW1 */
#define AUB_TRACE_OPERATION_MASK 0x000000ff
#define AUB_TRACE_OP_COMMENT 0x00000000
#define AUB_TRACE_OP_DATA_WRITE 0x00000001
#define AUB_TRACE_OP_COMMAND_WRITE 0x00000002
#define AUB_TRACE_OP_MMIO_WRITE 0x00000003
// operation = TRACE_DATA_WRITE, Type
#define AUB_TRACE_TYPE_MASK 0x0000ff00
#define AUB_TRACE_TYPE_NOTYPE (0 << 8)
#define AUB_TRACE_TYPE_BATCH (1 << 8)
#define AUB_TRACE_TYPE_VERTEX_BUFFER (5 << 8)
#define AUB_TRACE_TYPE_2D_MAP (6 << 8)
#define AUB_TRACE_TYPE_CUBE_MAP (7 << 8)
#define AUB_TRACE_TYPE_VOLUME_MAP (9 << 8)
#define AUB_TRACE_TYPE_1D_MAP (10 << 8)
#define AUB_TRACE_TYPE_CONSTANT_BUFFER (11 << 8)
#define AUB_TRACE_TYPE_CONSTANT_URB (12 << 8)
#define AUB_TRACE_TYPE_INDEX_BUFFER (13 << 8)
#define AUB_TRACE_TYPE_GENERAL (14 << 8)
#define AUB_TRACE_TYPE_SURFACE (15 << 8)
 
 
// operation = TRACE_COMMAND_WRITE, Type =
#define AUB_TRACE_TYPE_RING_HWB (1 << 8)
#define AUB_TRACE_TYPE_RING_PRB0 (2 << 8)
#define AUB_TRACE_TYPE_RING_PRB1 (3 << 8)
#define AUB_TRACE_TYPE_RING_PRB2 (4 << 8)
 
// Address space
#define AUB_TRACE_ADDRESS_SPACE_MASK 0x00ff0000
#define AUB_TRACE_MEMTYPE_GTT (0 << 16)
#define AUB_TRACE_MEMTYPE_LOCAL (1 << 16)
#define AUB_TRACE_MEMTYPE_NONLOCAL (2 << 16)
#define AUB_TRACE_MEMTYPE_PCI (3 << 16)
#define AUB_TRACE_MEMTYPE_GTT_ENTRY (4 << 16)
 
/* DW2 */
 
/**
* aub_state_struct_type enum values are encoded with the top 16 bits
* representing the type to be delivered to the .aub file, and the bottom 16
* bits representing the subtype. This macro performs the encoding.
*/
#define ENCODE_SS_TYPE(type, subtype) (((type) << 16) | (subtype))
 
enum aub_state_struct_type {
AUB_TRACE_VS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 1),
AUB_TRACE_GS_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 2),
AUB_TRACE_CLIP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 3),
AUB_TRACE_SF_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 4),
AUB_TRACE_WM_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 5),
AUB_TRACE_CC_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 6),
AUB_TRACE_CLIP_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 7),
AUB_TRACE_SF_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 8),
AUB_TRACE_CC_VP_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x9),
AUB_TRACE_SAMPLER_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xa),
AUB_TRACE_KERNEL_INSTRUCTIONS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xb),
AUB_TRACE_SCRATCH_SPACE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xc),
AUB_TRACE_SAMPLER_DEFAULT_COLOR = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0xd),
 
AUB_TRACE_SCISSOR_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x15),
AUB_TRACE_BLEND_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x16),
AUB_TRACE_DEPTH_STENCIL_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_GENERAL, 0x17),
 
AUB_TRACE_VERTEX_BUFFER = ENCODE_SS_TYPE(AUB_TRACE_TYPE_VERTEX_BUFFER, 0),
AUB_TRACE_BINDING_TABLE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x100),
AUB_TRACE_SURFACE_STATE = ENCODE_SS_TYPE(AUB_TRACE_TYPE_SURFACE, 0x200),
AUB_TRACE_VS_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 0),
AUB_TRACE_WM_CONSTANTS = ENCODE_SS_TYPE(AUB_TRACE_TYPE_CONSTANT_BUFFER, 1),
};
 
#undef ENCODE_SS_TYPE
 
/**
* Decode a aub_state_struct_type value to determine the type that should be
* stored in the .aub file.
*/
static inline uint32_t AUB_TRACE_TYPE(enum aub_state_struct_type ss_type)
{
return (ss_type & 0xFFFF0000) >> 16;
}
 
/**
* Decode a state_struct_type value to determine the subtype that should be
* stored in the .aub file.
*/
static inline uint32_t AUB_TRACE_SUBTYPE(enum aub_state_struct_type ss_type)
{
return ss_type & 0xFFFF;
}
 
/* DW3: address */
/* DW4: len */
 
#endif /* _INTEL_AUB_H */
/contrib/sdk/sources/libdrm/intel/intel_bufmgr.c
0,0 → 1,316
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
 
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
 
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <assert.h>
#include <errno.h>
#include <drm.h>
#include <i915_drm.h>
//#include <pciaccess.h>
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "xf86drm.h"
 
/** @file intel_bufmgr.c
*
* Convenience functions for buffer management methods.
*/
 
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment)
{
return bufmgr->bo_alloc(bufmgr, name, size, alignment);
}
 
#if 0
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
{
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
}
#endif
 
drm_intel_bo *
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
return bufmgr->bo_alloc_tiled(bufmgr, name, x, y, cpp,
tiling_mode, pitch, flags);
}
 
void drm_intel_bo_reference(drm_intel_bo *bo)
{
bo->bufmgr->bo_reference(bo);
}
 
void drm_intel_bo_unreference(drm_intel_bo *bo)
{
if (bo == NULL)
return;
 
bo->bufmgr->bo_unreference(bo);
}
 
int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
{
return buf->bufmgr->bo_map(buf, write_enable);
}
 
int drm_intel_bo_unmap(drm_intel_bo *buf)
{
return buf->bufmgr->bo_unmap(buf);
}
 
int
drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
return bo->bufmgr->bo_subdata(bo, offset, size, data);
}
 
int
drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
int ret;
// if (bo->bufmgr->bo_get_subdata)
// return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
 
if (size == 0 || data == NULL)
return 0;
 
ret = drm_intel_bo_map(bo, 0);
if (ret)
return ret;
memcpy(data, (unsigned char *)bo->virtual + offset, size);
drm_intel_bo_unmap(bo);
return 0;
}
 
void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
{
bo->bufmgr->bo_wait_rendering(bo);
}
 
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
{
bufmgr->destroy(bufmgr);
}
 
int
drm_intel_bo_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
{
return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
}
 
int
drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int rings)
{
if (bo->bufmgr->bo_mrb_exec)
return bo->bufmgr->bo_mrb_exec(bo, used,
cliprects, num_cliprects, DR4,
rings);
 
switch (rings) {
case I915_EXEC_DEFAULT:
case I915_EXEC_RENDER:
return bo->bufmgr->bo_exec(bo, used,
cliprects, num_cliprects, DR4);
default:
return -ENODEV;
}
}
 
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
{
bufmgr->debug = enable_debug;
}
 
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
{
return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
}
 
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
if (bo->bufmgr->bo_flink)
return bo->bufmgr->bo_flink(bo, name);
 
return -ENODEV;
}
 
int
drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
return bo->bufmgr->bo_emit_reloc(bo, offset,
target_bo, target_offset,
read_domains, write_domain);
}
 
/* For fence registers, not GL fences */
int
drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
return bo->bufmgr->bo_emit_reloc_fence(bo, offset,
target_bo, target_offset,
read_domains, write_domain);
}
 
 
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
if (bo->bufmgr->bo_pin)
return bo->bufmgr->bo_pin(bo, alignment);
 
return -ENODEV;
}
 
int drm_intel_bo_unpin(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_unpin)
return bo->bufmgr->bo_unpin(bo);
 
return -ENODEV;
}
 
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
if (bo->bufmgr->bo_set_tiling)
return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
 
*tiling_mode = I915_TILING_NONE;
return 0;
}
 
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
if (bo->bufmgr->bo_get_tiling)
return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
 
*tiling_mode = I915_TILING_NONE;
*swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
return 0;
}
 
int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_disable_reuse)
return bo->bufmgr->bo_disable_reuse(bo);
return 0;
}
 
int drm_intel_bo_is_reusable(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_is_reusable)
return bo->bufmgr->bo_is_reusable(bo);
return 0;
}
 
int drm_intel_bo_busy(drm_intel_bo *bo)
{
if (bo->bufmgr->bo_busy)
return bo->bufmgr->bo_busy(bo);
return 0;
}
 
int drm_intel_bo_madvise(drm_intel_bo *bo, int madv)
{
if (bo->bufmgr->bo_madvise)
return bo->bufmgr->bo_madvise(bo, madv);
return -1;
}
 
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
return bo->bufmgr->bo_references(bo, target_bo);
}
 
 
 
#if 0
static size_t
drm_intel_probe_agp_aperture_size(int fd)
{
struct pci_device *pci_dev;
size_t size = 0;
int ret;
 
ret = pci_system_init();
if (ret)
goto err;
 
/* XXX handle multiple adaptors? */
pci_dev = pci_device_find_by_slot(0, 0, 2, 0);
if (pci_dev == NULL)
goto err;
 
ret = pci_device_probe(pci_dev);
if (ret)
goto err;
 
size = pci_dev->regions[2].size;
err:
pci_system_cleanup ();
return size;
}
#endif
 
int drm_intel_get_aperture_sizes(int fd,
size_t *mappable,
size_t *total)
{
 
struct drm_i915_gem_get_aperture aperture;
int ret;
 
ret = drmIoctl(fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
if (ret)
return ret;
 
/* XXX add a query for the kernel value? */
*mappable = 512 * 1024 * 1024; /* minimum possible value */
*total = aperture.aper_size;
return 0;
}
/contrib/sdk/sources/libdrm/intel/intel_bufmgr.h
0,0 → 1,301
/*
* Copyright © 2008-2012 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
 
/**
* @file intel_bufmgr.h
*
* Public definitions of Intel-specific bufmgr functions.
*/
 
#ifndef INTEL_BUFMGR_H
#define INTEL_BUFMGR_H
 
#include <stdio.h>
#include <stdint.h>
#include <stdio.h>
 
struct drm_clip_rect;
 
typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
typedef struct _drm_intel_context drm_intel_context;
typedef struct _drm_intel_bo drm_intel_bo;
 
struct _drm_intel_bo {
/**
* Size in bytes of the buffer object.
*
* The size may be larger than the size originally requested for the
* allocation, such as being aligned to page size.
*/
unsigned long size;
 
/**
* Alignment requirement for object
*
* Used for GTT mapping & pinning the object.
*/
unsigned long align;
 
/**
* Last seen card virtual address (offset from the beginning of the
* aperture) for the object. This should be used to fill relocation
* entries when calling drm_intel_bo_emit_reloc()
*/
unsigned long offset;
 
/**
* Virtual address for accessing the buffer data. Only valid while
* mapped.
*/
#ifdef __cplusplus
void *virt;
#else
void *virtual;
#endif
 
/** Buffer manager context associated with this buffer object */
drm_intel_bufmgr *bufmgr;
 
/**
* MM-specific handle for accessing object
*/
int handle;
};
 
enum aub_dump_bmp_format {
AUB_DUMP_BMP_FORMAT_8BIT = 1,
AUB_DUMP_BMP_FORMAT_ARGB_4444 = 4,
AUB_DUMP_BMP_FORMAT_ARGB_0888 = 6,
AUB_DUMP_BMP_FORMAT_ARGB_8888 = 7,
};
 
typedef struct _drm_intel_aub_annotation {
uint32_t type;
uint32_t subtype;
uint32_t ending_offset;
} drm_intel_aub_annotation;
 
#define BO_ALLOC_FOR_RENDER (1<<0)
 
drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment);
drm_intel_bo *drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
void drm_intel_bo_reference(drm_intel_bo *bo);
void drm_intel_bo_unreference(drm_intel_bo *bo);
int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
int drm_intel_bo_unmap(drm_intel_bo *bo);
 
int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data);
void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
 
void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
int drm_intel_bo_exec(drm_intel_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4);
int drm_intel_bo_mrb_exec(drm_intel_bo *bo, int used,
struct drm_clip_rect *cliprects, int num_cliprects, int DR4,
unsigned int flags);
int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
 
int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
int drm_intel_bo_unpin(drm_intel_bo *bo);
int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
int drm_intel_bo_busy(drm_intel_bo *bo);
int drm_intel_bo_madvise(drm_intel_bo *bo, int madv);
 
int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
int drm_intel_bo_is_reusable(drm_intel_bo *bo);
int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo);
 
/* drm_intel_bufmgr_gem.c */
drm_intel_bufmgr *drm_intel_bufmgr_gem_init(int fd, int batch_size);
drm_intel_bo *drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle);
drm_intel_bo *
bo_create_from_gem_handle(drm_intel_bufmgr *bufmgr,
unsigned int size, unsigned int handle);
 
void drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr,
int limit);
int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo);
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo);
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo);
 
int drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo);
void drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start);
void drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable);
 
void
drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
const char *filename);
void drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable);
void drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
int pitch, int offset);
void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count);
 
int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id);
 
int drm_intel_get_aperture_sizes(int fd, size_t *mappable, size_t *total);
int drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr);
int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns);
 
drm_intel_context *drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr);
void drm_intel_gem_context_destroy(drm_intel_context *ctx);
int drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags);
 
int drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd);
drm_intel_bo *drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr,
int prime_fd, int size);
 
/* drm_intel_bufmgr_fake.c */
drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
unsigned long low_offset,
void *low_virtual,
unsigned long size,
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
volatile unsigned int
*last_dispatch);
void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
int (*exec) (drm_intel_bo *bo,
unsigned int used,
void *priv),
void *priv);
void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
unsigned int (*emit) (void *priv),
void (*wait) (unsigned int fence,
void *priv),
void *priv);
drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long offset,
unsigned long size, void *virt);
void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
void (*invalidate_cb) (drm_intel_bo
* bo,
void *ptr),
void *ptr);
 
void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
 
struct drm_intel_decode *drm_intel_decode_context_alloc(uint32_t devid);
void drm_intel_decode_context_free(struct drm_intel_decode *ctx);
void drm_intel_decode_set_batch_pointer(struct drm_intel_decode *ctx,
void *data, uint32_t hw_offset,
int count);
void drm_intel_decode_set_dump_past_end(struct drm_intel_decode *ctx,
int dump_past_end);
void drm_intel_decode_set_head_tail(struct drm_intel_decode *ctx,
uint32_t head, uint32_t tail);
void drm_intel_decode_set_output_file(struct drm_intel_decode *ctx, FILE *out);
void drm_intel_decode(struct drm_intel_decode *ctx);
 
int drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result);
 
int drm_intel_get_reset_stats(drm_intel_context *ctx,
uint32_t *reset_count,
uint32_t *active,
uint32_t *pending);
 
/** @{ Compatibility defines to keep old code building despite the symbol rename
* from dri_* to drm_intel_*
*/
#define dri_bo drm_intel_bo
#define dri_bufmgr drm_intel_bufmgr
#define dri_bo_alloc drm_intel_bo_alloc
#define dri_bo_reference drm_intel_bo_reference
#define dri_bo_unreference drm_intel_bo_unreference
#define dri_bo_map drm_intel_bo_map
#define dri_bo_unmap drm_intel_bo_unmap
#define dri_bo_subdata drm_intel_bo_subdata
#define dri_bo_get_subdata drm_intel_bo_get_subdata
#define dri_bo_wait_rendering drm_intel_bo_wait_rendering
#define dri_bufmgr_set_debug drm_intel_bufmgr_set_debug
#define dri_bufmgr_destroy drm_intel_bufmgr_destroy
#define dri_bo_exec drm_intel_bo_exec
#define dri_bufmgr_check_aperture_space drm_intel_bufmgr_check_aperture_space
#define dri_bo_emit_reloc(reloc_bo, read, write, target_offset, \
reloc_offset, target_bo) \
drm_intel_bo_emit_reloc(reloc_bo, reloc_offset, \
target_bo, target_offset, \
read, write);
#define dri_bo_pin drm_intel_bo_pin
#define dri_bo_unpin drm_intel_bo_unpin
#define dri_bo_get_tiling drm_intel_bo_get_tiling
#define dri_bo_set_tiling(bo, mode) drm_intel_bo_set_tiling(bo, mode, 0)
#define dri_bo_flink drm_intel_bo_flink
#define intel_bufmgr_gem_init drm_intel_bufmgr_gem_init
#define intel_bo_gem_create_from_name drm_intel_bo_gem_create_from_name
#define intel_bufmgr_gem_enable_reuse drm_intel_bufmgr_gem_enable_reuse
#define intel_bufmgr_fake_init drm_intel_bufmgr_fake_init
#define intel_bufmgr_fake_set_last_dispatch drm_intel_bufmgr_fake_set_last_dispatch
#define intel_bufmgr_fake_set_exec_callback drm_intel_bufmgr_fake_set_exec_callback
#define intel_bufmgr_fake_set_fence_callback drm_intel_bufmgr_fake_set_fence_callback
#define intel_bo_fake_alloc_static drm_intel_bo_fake_alloc_static
#define intel_bo_fake_disable_backing_store drm_intel_bo_fake_disable_backing_store
#define intel_bufmgr_fake_contended_lock_take drm_intel_bufmgr_fake_contended_lock_take
#define intel_bufmgr_fake_evict_all drm_intel_bufmgr_fake_evict_all
 
/** @{ */
 
#endif /* INTEL_BUFMGR_H */
/contrib/sdk/sources/libdrm/intel/intel_bufmgr_gem.c
0,0 → 1,3246
/**************************************************************************
*
* Copyright © 2007 Red Hat Inc.
* Copyright © 2007-2012 Intel Corporation
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
* Eric Anholt <eric@anholt.net>
* Dave Airlie <airlied@linux.ie>
*/
 
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
 
#include <xf86drm.h>
#include <xf86atomic.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <assert.h>
//#include <pthread.h>
#include <stdbool.h>
 
#include "errno.h"
#ifndef ETIME
#define ETIME ETIMEDOUT
#endif
#include "libdrm_lists.h"
#include "intel_bufmgr.h"
#include "intel_bufmgr_priv.h"
#include "intel_chipset.h"
#include "intel_aub.h"
#include "string.h"
 
#include "i915_drm.h"
 
#ifdef HAVE_VALGRIND
#include <valgrind.h>
#include <memcheck.h>
#define VG(x) x
#else
#define VG(x)
#endif
 
#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s)))
 
#if 0
#define DBG(...) do { \
if (bufmgr_gem->bufmgr.debug) \
fprintf(stderr, __VA_ARGS__); \
} while (0)
#endif
 
//#define DBG(...) fprintf(stderr, __VA_ARGS__)
#define DBG(...)
 
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
 
typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
 
struct drm_intel_gem_bo_bucket {
drmMMListHead head;
unsigned long size;
};
 
typedef struct _drm_intel_bufmgr_gem {
drm_intel_bufmgr bufmgr;
 
int fd;
 
int max_relocs;
 
// pthread_mutex_t lock;
 
struct drm_i915_gem_exec_object *exec_objects;
struct drm_i915_gem_exec_object2 *exec2_objects;
drm_intel_bo **exec_bos;
int exec_size;
int exec_count;
 
/** Array of lists of cached gem objects of power-of-two sizes */
struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
int num_buckets;
time_t time;
 
drmMMListHead named;
drmMMListHead vma_cache;
int vma_count, vma_open, vma_max;
 
uint64_t gtt_size;
int available_fences;
int pci_device;
int gen;
unsigned int has_bsd : 1;
unsigned int has_blt : 1;
unsigned int has_relaxed_fencing : 1;
unsigned int has_llc : 1;
unsigned int has_wait_timeout : 1;
unsigned int bo_reuse : 1;
unsigned int no_exec : 1;
unsigned int has_vebox : 1;
bool fenced_relocs;
 
char *aub_filename;
FILE *aub_file;
uint32_t aub_offset;
} drm_intel_bufmgr_gem;
 
#define DRM_INTEL_RELOC_FENCE (1<<0)
 
typedef struct _drm_intel_reloc_target_info {
drm_intel_bo *bo;
int flags;
} drm_intel_reloc_target;
 
struct _drm_intel_bo_gem {
drm_intel_bo bo;
 
atomic_t refcount;
uint32_t gem_handle;
const char *name;
 
/**
* Kenel-assigned global name for this object
*
* List contains both flink named and prime fd'd objects
*/
unsigned int global_name;
drmMMListHead name_list;
 
/**
* Index of the buffer within the validation list while preparing a
* batchbuffer execution.
*/
int validate_index;
 
/**
* Current tiling mode
*/
uint32_t tiling_mode;
uint32_t swizzle_mode;
unsigned long stride;
 
time_t free_time;
 
/** Array passed to the DRM containing relocation information. */
struct drm_i915_gem_relocation_entry *relocs;
/**
* Array of info structs corresponding to relocs[i].target_handle etc
*/
drm_intel_reloc_target *reloc_target_info;
/** Number of entries in relocs */
int reloc_count;
/** Mapped address for the buffer, saved across map/unmap cycles */
void *mem_virtual;
/** GTT virtual address for the buffer, saved across map/unmap cycles */
void *gtt_virtual;
int map_count;
drmMMListHead vma_list;
 
/** BO cache list */
drmMMListHead head;
 
/**
* Boolean of whether this BO and its children have been included in
* the current drm_intel_bufmgr_check_aperture_space() total.
*/
bool included_in_check_aperture;
 
/**
* Boolean of whether this buffer has been used as a relocation
* target and had its size accounted for, and thus can't have any
* further relocations added to it.
*/
bool used_as_reloc_target;
 
/**
* Boolean of whether we have encountered an error whilst building the relocation tree.
*/
bool has_error;
 
/**
* Boolean of whether this buffer can be re-used
*/
bool reusable;
 
/**
* Size in bytes of this buffer and its relocation descendents.
*
* Used to avoid costly tree walking in
* drm_intel_bufmgr_check_aperture in the common case.
*/
int reloc_tree_size;
 
/**
* Number of potential fence registers required by this buffer and its
* relocations.
*/
int reloc_tree_fences;
 
/** Flags that we may need to do the SW_FINSIH ioctl on unmap. */
bool mapped_cpu_write;
 
uint32_t aub_offset;
 
drm_intel_aub_annotation *aub_annotations;
unsigned aub_annotation_count;
};
 
static unsigned int
drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
 
static unsigned int
drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
 
static int
drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
 
static int
drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
uint32_t tiling_mode,
uint32_t stride);
 
static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
time_t time);
 
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
 
static void drm_intel_gem_bo_free(drm_intel_bo *bo);
 
static unsigned long
drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
uint32_t *tiling_mode)
{
unsigned long min_size, max_size;
unsigned long i;
 
if (*tiling_mode == I915_TILING_NONE)
return size;
 
/* 965+ just need multiples of page size for tiling */
if (bufmgr_gem->gen >= 4)
return ROUND_UP_TO(size, 4096);
 
/* Older chips need powers of two, of at least 512k or 1M */
if (bufmgr_gem->gen == 3) {
min_size = 1024*1024;
max_size = 128*1024*1024;
} else {
min_size = 512*1024;
max_size = 64*1024*1024;
}
 
if (size > max_size) {
*tiling_mode = I915_TILING_NONE;
return size;
}
 
/* Do we need to allocate every page for the fence? */
if (bufmgr_gem->has_relaxed_fencing)
return ROUND_UP_TO(size, 4096);
 
for (i = min_size; i < size; i <<= 1)
;
 
return i;
}
 
/*
* Round a given pitch up to the minimum required for X tiling on a
* given chip. We use 512 as the minimum to allow for a later tiling
* change.
*/
static unsigned long
drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long pitch, uint32_t *tiling_mode)
{
unsigned long tile_width;
unsigned long i;
 
/* If untiled, then just align it so that we can do rendering
* to it with the 3D engine.
*/
if (*tiling_mode == I915_TILING_NONE)
return ALIGN(pitch, 64);
 
if (*tiling_mode == I915_TILING_X
|| (IS_915(bufmgr_gem->pci_device)
&& *tiling_mode == I915_TILING_Y))
tile_width = 512;
else
tile_width = 128;
 
/* 965 is flexible */
if (bufmgr_gem->gen >= 4)
return ROUND_UP_TO(pitch, tile_width);
 
/* The older hardware has a maximum pitch of 8192 with tiled
* surfaces, so fallback to untiled if it's too large.
*/
if (pitch > 8192) {
*tiling_mode = I915_TILING_NONE;
return ALIGN(pitch, 64);
}
 
/* Pre-965 needs power of two tile width */
for (i = tile_width; i < pitch; i <<= 1)
;
 
return i;
}
 
static struct drm_intel_gem_bo_bucket *
drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
unsigned long size)
{
int i;
 
for (i = 0; i < bufmgr_gem->num_buckets; i++) {
struct drm_intel_gem_bo_bucket *bucket =
&bufmgr_gem->cache_bucket[i];
if (bucket->size >= size) {
return bucket;
}
}
 
return NULL;
}
 
static void
drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
{
int i, j;
 
for (i = 0; i < bufmgr_gem->exec_count; i++) {
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
if (bo_gem->relocs == NULL) {
DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
bo_gem->name);
continue;
}
 
for (j = 0; j < bo_gem->reloc_count; j++) {
drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
drm_intel_bo_gem *target_gem =
(drm_intel_bo_gem *) target_bo;
 
DBG("%2d: %d (%s)@0x%08llx -> "
"%d (%s)@0x%08lx + 0x%08x\n",
i,
bo_gem->gem_handle, bo_gem->name,
(unsigned long long)bo_gem->relocs[j].offset,
target_gem->gem_handle,
target_gem->name,
target_bo->offset,
bo_gem->relocs[j].delta);
}
}
}
 
static inline void
drm_intel_gem_bo_reference(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
atomic_inc(&bo_gem->refcount);
}
 
/**
* Adds the given buffer to the list of buffers to be validated (moved into the
* appropriate memory type) with the next batch submission.
*
* If a buffer is validated multiple times in a batch submission, it ends up
* with the intersection of the memory type flags and the union of the
* access flags.
*/
static void
drm_intel_add_validate_buffer(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int index;
 
if (bo_gem->validate_index != -1)
return;
 
/* Extend the array of validation entries as necessary. */
if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
int new_size = bufmgr_gem->exec_size * 2;
 
if (new_size == 0)
new_size = 5;
 
bufmgr_gem->exec_objects =
realloc(bufmgr_gem->exec_objects,
sizeof(*bufmgr_gem->exec_objects) * new_size);
bufmgr_gem->exec_bos =
realloc(bufmgr_gem->exec_bos,
sizeof(*bufmgr_gem->exec_bos) * new_size);
bufmgr_gem->exec_size = new_size;
}
 
index = bufmgr_gem->exec_count;
bo_gem->validate_index = index;
/* Fill in array entry */
bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
bufmgr_gem->exec_objects[index].alignment = 0;
bufmgr_gem->exec_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
bufmgr_gem->exec_count++;
}
 
static void
drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int index;
 
if (bo_gem->validate_index != -1) {
if (need_fence)
bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
EXEC_OBJECT_NEEDS_FENCE;
return;
}
 
/* Extend the array of validation entries as necessary. */
if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
int new_size = bufmgr_gem->exec_size * 2;
 
if (new_size == 0)
new_size = 5;
 
bufmgr_gem->exec2_objects =
realloc(bufmgr_gem->exec2_objects,
sizeof(*bufmgr_gem->exec2_objects) * new_size);
bufmgr_gem->exec_bos =
realloc(bufmgr_gem->exec_bos,
sizeof(*bufmgr_gem->exec_bos) * new_size);
bufmgr_gem->exec_size = new_size;
}
 
index = bufmgr_gem->exec_count;
bo_gem->validate_index = index;
/* Fill in array entry */
bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
bufmgr_gem->exec2_objects[index].alignment = 0;
bufmgr_gem->exec2_objects[index].offset = 0;
bufmgr_gem->exec_bos[index] = bo;
bufmgr_gem->exec2_objects[index].flags = 0;
bufmgr_gem->exec2_objects[index].rsvd1 = 0;
bufmgr_gem->exec2_objects[index].rsvd2 = 0;
if (need_fence) {
bufmgr_gem->exec2_objects[index].flags |=
EXEC_OBJECT_NEEDS_FENCE;
}
bufmgr_gem->exec_count++;
}
 
#define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
sizeof(uint32_t))
 
static void
drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
drm_intel_bo_gem *bo_gem)
{
int size;
 
assert(!bo_gem->used_as_reloc_target);
 
/* The older chipsets are far-less flexible in terms of tiling,
* and require tiled buffer to be size aligned in the aperture.
* This means that in the worst possible case we will need a hole
* twice as large as the object in order for it to fit into the
* aperture. Optimal packing is for wimps.
*/
size = bo_gem->bo.size;
if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
int min_size;
 
if (bufmgr_gem->has_relaxed_fencing) {
if (bufmgr_gem->gen == 3)
min_size = 1024*1024;
else
min_size = 512*1024;
 
while (min_size < size)
min_size *= 2;
} else
min_size = size;
 
/* Account for worst-case alignment. */
size = 2 * min_size;
}
 
bo_gem->reloc_tree_size = size;
}
 
static int
drm_intel_setup_reloc_list(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
unsigned int max_relocs = bufmgr_gem->max_relocs;
 
if (bo->size / 4 < max_relocs)
max_relocs = bo->size / 4;
 
bo_gem->relocs = malloc(max_relocs *
sizeof(struct drm_i915_gem_relocation_entry));
bo_gem->reloc_target_info = malloc(max_relocs *
sizeof(drm_intel_reloc_target));
if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
bo_gem->has_error = true;
 
free (bo_gem->relocs);
bo_gem->relocs = NULL;
 
free (bo_gem->reloc_target_info);
bo_gem->reloc_target_info = NULL;
 
return 1;
}
 
return 0;
}
 
static int
drm_intel_gem_bo_busy(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_busy busy;
int ret;
 
VG_CLEAR(busy);
busy.handle = bo_gem->gem_handle;
 
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
 
return (ret == 0 && busy.busy);
}
 
static int
drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
drm_intel_bo_gem *bo_gem, int state)
{
struct drm_i915_gem_madvise madv;
 
VG_CLEAR(madv);
madv.handle = bo_gem->gem_handle;
madv.madv = state;
madv.retained = 1;
// drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
 
return madv.retained;
}
 
static int
drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
{
return drm_intel_gem_bo_madvise_internal
((drm_intel_bufmgr_gem *) bo->bufmgr,
(drm_intel_bo_gem *) bo,
madv);
}
 
/* drop the oldest entries that have been purged by the kernel */
static void
drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
struct drm_intel_gem_bo_bucket *bucket)
{
while (!DRMLISTEMPTY(&bucket->head)) {
drm_intel_bo_gem *bo_gem;
 
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.next, head);
if (drm_intel_gem_bo_madvise_internal
(bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
break;
 
DRMLISTDEL(&bo_gem->head);
drm_intel_gem_bo_free(&bo_gem->bo);
}
}
 
static drm_intel_bo *
drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned long flags,
uint32_t tiling_mode,
unsigned long stride)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
drm_intel_bo_gem *bo_gem;
unsigned int page_size = 4096;
int ret;
struct drm_intel_gem_bo_bucket *bucket;
bool alloc_from_cache;
unsigned long bo_size;
bool for_render = false;
 
if (flags & BO_ALLOC_FOR_RENDER)
for_render = true;
 
/* Round the allocated size up to a power of two number of pages. */
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
 
/* If we don't have caching at this size, don't actually round the
* allocation up.
*/
if (bucket == NULL) {
bo_size = size;
if (bo_size < page_size)
bo_size = page_size;
} else {
bo_size = bucket->size;
}
 
// pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a buffer out of the cache if available */
retry:
alloc_from_cache = false;
if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
if (for_render) {
/* Allocate new render-target BOs from the tail (MRU)
* of the list, as it will likely be hot in the GPU
* cache and in the aperture for us.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.prev, head);
DRMLISTDEL(&bo_gem->head);
alloc_from_cache = true;
} else {
/* For non-render-target BOs (where we're probably
* going to map it first thing in order to fill it
* with data), check if the last BO in the cache is
* unbusy, and only reuse in that case. Otherwise,
* allocating a new buffer is probably faster than
* waiting for the GPU to finish.
*/
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.next, head);
if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
alloc_from_cache = true;
DRMLISTDEL(&bo_gem->head);
}
}
 
if (alloc_from_cache) {
if (!drm_intel_gem_bo_madvise_internal
(bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
drm_intel_gem_bo_free(&bo_gem->bo);
drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
bucket);
goto retry;
}
 
if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
tiling_mode,
stride)) {
drm_intel_gem_bo_free(&bo_gem->bo);
goto retry;
}
}
}
// pthread_mutex_unlock(&bufmgr_gem->lock);
 
if (!alloc_from_cache) {
struct drm_i915_gem_create create;
 
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
 
bo_gem->bo.size = bo_size;
 
VG_CLEAR(create);
create.size = bo_size;
 
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_CREATE,
&create);
bo_gem->gem_handle = create.handle;
bo_gem->bo.handle = bo_gem->gem_handle;
if (ret != 0) {
free(bo_gem);
return NULL;
}
bo_gem->bo.bufmgr = bufmgr;
 
bo_gem->tiling_mode = I915_TILING_NONE;
bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
bo_gem->stride = 0;
 
if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
tiling_mode,
stride)) {
drm_intel_gem_bo_free(&bo_gem->bo);
return NULL;
}
 
DRMINITLISTHEAD(&bo_gem->name_list);
DRMINITLISTHEAD(&bo_gem->vma_list);
}
 
bo_gem->name = name;
atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
bo_gem->reloc_tree_fences = 0;
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = true;
bo_gem->aub_annotations = NULL;
bo_gem->aub_annotation_count = 0;
 
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
DBG("bo_create: buf %d (%s) %ldb\n",
bo_gem->gem_handle, bo_gem->name, size);
 
return &bo_gem->bo;
}
 
static drm_intel_bo *
drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
BO_ALLOC_FOR_RENDER,
I915_TILING_NONE, 0);
}
 
static drm_intel_bo *
drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned long size,
unsigned int alignment)
{
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
I915_TILING_NONE, 0);
}
 
static drm_intel_bo *
drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
int x, int y, int cpp, uint32_t *tiling_mode,
unsigned long *pitch, unsigned long flags)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
unsigned long size, stride;
uint32_t tiling;
 
do {
unsigned long aligned_y, height_alignment;
 
tiling = *tiling_mode;
 
/* If we're tiled, our allocations are in 8 or 32-row blocks,
* so failure to align our height means that we won't allocate
* enough pages.
*
* If we're untiled, we still have to align to 2 rows high
* because the data port accesses 2x2 blocks even if the
* bottom row isn't to be rendered, so failure to align means
* we could walk off the end of the GTT and fault. This is
* documented on 965, and may be the case on older chipsets
* too so we try to be careful.
*/
aligned_y = y;
height_alignment = 2;
 
if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
height_alignment = 16;
else if (tiling == I915_TILING_X
|| (IS_915(bufmgr_gem->pci_device)
&& tiling == I915_TILING_Y))
height_alignment = 8;
else if (tiling == I915_TILING_Y)
height_alignment = 32;
aligned_y = ALIGN(y, height_alignment);
 
stride = x * cpp;
stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
size = stride * aligned_y;
size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
} while (*tiling_mode != tiling);
*pitch = stride;
 
if (tiling == I915_TILING_NONE)
stride = 0;
 
return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
tiling, stride);
}
 
/**
* Returns a drm_intel_bo wrapping the given buffer object handle.
*
* This can be used when one application needs to pass a buffer object
* to another.
*/
drm_intel_bo *
drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
const char *name,
unsigned int handle)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
drm_intel_bo_gem *bo_gem;
int ret;
struct drm_gem_open open_arg;
struct drm_i915_gem_get_tiling get_tiling;
drmMMListHead *list;
 
/* At the moment most applications only have a few named bo.
* For instance, in a DRI client only the render buffers passed
* between X and the client are named. And since X returns the
* alternating names for the front/back buffer a linear search
* provides a sufficiently fast match.
*/
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->global_name == handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
return &bo_gem->bo;
}
}
 
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
 
VG_CLEAR(open_arg);
open_arg.name = handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_GEM_OPEN,
&open_arg);
if (ret != 0) {
DBG("Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
free(bo_gem);
return NULL;
}
bo_gem->bo.size = open_arg.size;
bo_gem->bo.offset = 0;
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = name;
atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
bo_gem->gem_handle = open_arg.handle;
bo_gem->bo.handle = open_arg.handle;
bo_gem->global_name = handle;
bo_gem->reusable = false;
 
VG_CLEAR(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_GET_TILING,
&get_tiling);
if (ret != 0) {
drm_intel_gem_bo_unreference(&bo_gem->bo);
return NULL;
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
 
return &bo_gem->bo;
}
 
static void
drm_intel_gem_bo_free(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_gem_close close;
int ret;
 
DRMLISTDEL(&bo_gem->vma_list);
if (bo_gem->mem_virtual) {
VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
bufmgr_gem->vma_count--;
}
if (bo_gem->gtt_virtual) {
bufmgr_gem->vma_count--;
}
 
/* Close this object */
VG_CLEAR(close);
close.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
if (ret != 0) {
DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(errno));
}
free(bo_gem->aub_annotations);
free(bo);
}
 
static void
drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
{
#if HAVE_VALGRIND
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
if (bo_gem->mem_virtual)
VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
 
if (bo_gem->gtt_virtual)
VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
#endif
}
 
/** Frees all cached buffers significantly older than @time. */
static void
drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
{
int i;
 
if (bufmgr_gem->time == time)
return;
 
for (i = 0; i < bufmgr_gem->num_buckets; i++) {
struct drm_intel_gem_bo_bucket *bucket =
&bufmgr_gem->cache_bucket[i];
 
while (!DRMLISTEMPTY(&bucket->head)) {
drm_intel_bo_gem *bo_gem;
 
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.next, head);
if (time - bo_gem->free_time <= 1)
break;
 
DRMLISTDEL(&bo_gem->head);
 
drm_intel_gem_bo_free(&bo_gem->bo);
}
}
 
bufmgr_gem->time = time;
}
 
static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
{
int limit;
 
DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
 
if (bufmgr_gem->vma_max < 0)
return;
 
/* We may need to evict a few entries in order to create new mmaps */
limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
if (limit < 0)
limit = 0;
 
while (bufmgr_gem->vma_count > limit) {
drm_intel_bo_gem *bo_gem;
 
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bufmgr_gem->vma_cache.next,
vma_list);
assert(bo_gem->map_count == 0);
DRMLISTDELINIT(&bo_gem->vma_list);
 
if (bo_gem->mem_virtual) {
// munmap(bo_gem->mem_virtual, bo_gem->bo.size);
bo_gem->mem_virtual = NULL;
bufmgr_gem->vma_count--;
}
if (bo_gem->gtt_virtual) {
// munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
bo_gem->gtt_virtual = NULL;
bufmgr_gem->vma_count--;
}
}
}
 
static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
drm_intel_bo_gem *bo_gem)
{
bufmgr_gem->vma_open--;
DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
if (bo_gem->mem_virtual)
bufmgr_gem->vma_count++;
if (bo_gem->gtt_virtual)
bufmgr_gem->vma_count++;
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
}
 
static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
drm_intel_bo_gem *bo_gem)
{
bufmgr_gem->vma_open++;
DRMLISTDEL(&bo_gem->vma_list);
if (bo_gem->mem_virtual)
bufmgr_gem->vma_count--;
if (bo_gem->gtt_virtual)
bufmgr_gem->vma_count--;
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
}
 
static void
drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_intel_gem_bo_bucket *bucket;
int i;
 
/* Unreference all the target buffers */
for (i = 0; i < bo_gem->reloc_count; i++) {
if (bo_gem->reloc_target_info[i].bo != bo) {
drm_intel_gem_bo_unreference_locked_timed(bo_gem->
reloc_target_info[i].bo,
time);
}
}
bo_gem->reloc_count = 0;
bo_gem->used_as_reloc_target = false;
 
DBG("bo_unreference final: %d (%s)\n",
bo_gem->gem_handle, bo_gem->name);
 
/* release memory associated with this object */
if (bo_gem->reloc_target_info) {
free(bo_gem->reloc_target_info);
bo_gem->reloc_target_info = NULL;
}
if (bo_gem->relocs) {
free(bo_gem->relocs);
bo_gem->relocs = NULL;
}
 
/* Clear any left-over mappings */
if (bo_gem->map_count) {
DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
bo_gem->map_count = 0;
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
}
 
DRMLISTDEL(&bo_gem->name_list);
 
bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
/* Put the buffer into our internal cache for reuse if we can. */
if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
I915_MADV_DONTNEED)) {
bo_gem->free_time = time;
 
bo_gem->name = NULL;
bo_gem->validate_index = -1;
 
DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
} else {
drm_intel_gem_bo_free(bo);
}
}
 
static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
time_t time)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
assert(atomic_read(&bo_gem->refcount) > 0);
if (atomic_dec_and_test(&bo_gem->refcount))
drm_intel_gem_bo_unreference_final(bo, time);
}
 
static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
assert(atomic_read(&bo_gem->refcount) > 0);
if (atomic_dec_and_test(&bo_gem->refcount)) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
// struct timespec time;
 
// clock_gettime(CLOCK_MONOTONIC, &time);
 
// pthread_mutex_lock(&bufmgr_gem->lock);
drm_intel_gem_bo_unreference_final(bo, 0);
drm_intel_gem_cleanup_bo_cache(bufmgr_gem, 0);
// pthread_mutex_unlock(&bufmgr_gem->lock);
}
}
 
static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
 
// pthread_mutex_lock(&bufmgr_gem->lock);
 
if (bo_gem->map_count++ == 0)
drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
 
if (!bo_gem->mem_virtual) {
struct drm_i915_gem_mmap mmap_arg;
 
DBG("bo_map: %d (%s), map_count=%d\n",
bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
 
VG_CLEAR(mmap_arg);
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
mmap_arg.size = bo->size;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_MMAP,
&mmap_arg);
if (ret != 0) {
ret = -errno;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle,
bo_gem->name, strerror(errno));
if (--bo_gem->map_count == 0)
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
// pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
}
DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->mem_virtual);
bo->virtual = bo_gem->mem_virtual;
 
VG_CLEAR(set_domain);
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_CPU;
if (write_enable)
set_domain.write_domain = I915_GEM_DOMAIN_CPU;
else
set_domain.write_domain = 0;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
if (ret != 0) {
DBG("%s:%d: Error setting to CPU domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
strerror(errno));
}
 
if (write_enable)
bo_gem->mapped_cpu_write = true;
 
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
// pthread_mutex_unlock(&bufmgr_gem->lock);
 
return 0;
}
 
static int
map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
 
if (bo_gem->map_count++ == 0)
drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
 
/* Get a mapping of the buffer if we haven't before. */
if (bo_gem->gtt_virtual == NULL) {
struct drm_i915_gem_mmap_gtt mmap_arg;
 
DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
 
VG_CLEAR(mmap_arg);
mmap_arg.handle = bo_gem->gem_handle;
mmap_arg.offset = 0;
 
/* Get the fake offset back... */
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_MMAP_GTT,
&mmap_arg);
if (ret != 0) {
ret = -errno;
DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name,
strerror(errno));
if (--bo_gem->map_count == 0)
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
return ret;
}
 
/* and mmap it */
bo_gem->gtt_virtual = mmap_arg.offset;
if (bo_gem->gtt_virtual == 0) {
bo_gem->gtt_virtual = NULL;
ret = -errno;
DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
__FILE__, __LINE__,
bo_gem->gem_handle, bo_gem->name,
strerror(errno));
if (--bo_gem->map_count == 0)
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
return ret;
}
}
 
bo->virtual = bo_gem->gtt_virtual;
 
DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
bo_gem->gtt_virtual);
 
return 0;
}
 
int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
 
// pthread_mutex_lock(&bufmgr_gem->lock);
 
ret = map_gtt(bo);
if (ret) {
// pthread_mutex_unlock(&bufmgr_gem->lock);
return ret;
}
 
/* Now move it to the GTT domain so that the GPU and CPU
* caches are flushed and the GPU isn't actively using the
* buffer.
*
* The pagefault handler does this domain change for us when
* it has unbound the BO from the GTT, but it's up to us to
* tell it when we're about to use things if we had done
* rendering and it still happens to be bound to the GTT.
*/
VG_CLEAR(set_domain);
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = I915_GEM_DOMAIN_GTT;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
if (ret != 0) {
DBG("%s:%d: Error setting domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
strerror(errno));
}
 
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
// pthread_mutex_unlock(&bufmgr_gem->lock);
 
return 0;
}
 
/**
* Performs a mapping of the buffer object like the normal GTT
* mapping, but avoids waiting for the GPU to be done reading from or
* rendering to the buffer.
*
* This is used in the implementation of GL_ARB_map_buffer_range: The
* user asks to create a buffer, then does a mapping, fills some
* space, runs a drawing command, then asks to map it again without
* synchronizing because it guarantees that it won't write over the
* data that the GPU is busy using (or, more specifically, that if it
* does write over the data, it acknowledges that rendering is
* undefined).
*/
 
int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
int ret;
 
/* If the CPU cache isn't coherent with the GTT, then use a
* regular synchronized mapping. The problem is that we don't
* track where the buffer was last used on the CPU side in
* terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
* we would potentially corrupt the buffer even when the user
* does reasonable things.
*/
if (!bufmgr_gem->has_llc)
return drm_intel_gem_bo_map_gtt(bo);
 
// pthread_mutex_lock(&bufmgr_gem->lock);
ret = map_gtt(bo);
// pthread_mutex_unlock(&bufmgr_gem->lock);
 
return ret;
}
 
static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret = 0;
 
if (bo == NULL)
return 0;
 
// pthread_mutex_lock(&bufmgr_gem->lock);
 
if (bo_gem->map_count <= 0) {
DBG("attempted to unmap an unmapped bo\n");
// pthread_mutex_unlock(&bufmgr_gem->lock);
/* Preserve the old behaviour of just treating this as a
* no-op rather than reporting the error.
*/
return 0;
}
 
if (bo_gem->mapped_cpu_write) {
struct drm_i915_gem_sw_finish sw_finish;
 
/* Cause a flush to happen if the buffer's pinned for
* scanout, so the results show up in a timely manner.
* Unlike GTT set domains, this only does work if the
* buffer should be scanout-related.
*/
 
bo_gem->mapped_cpu_write = false;
}
 
/* We need to unmap after every innovation as we cannot track
* an open vma for every bo as that will exhaasut the system
* limits and cause later failures.
*/
if (--bo_gem->map_count == 0) {
drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
bo->virtual = NULL;
}
// pthread_mutex_unlock(&bufmgr_gem->lock);
 
return ret;
}
 
int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
{
return drm_intel_gem_bo_unmap(bo);
}
 
static int
drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_pwrite pwrite;
int ret;
 
VG_CLEAR(pwrite);
pwrite.handle = bo_gem->gem_handle;
pwrite.offset = offset;
pwrite.size = size;
pwrite.data_ptr = (uint64_t) (uintptr_t) data;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PWRITE,
&pwrite);
if (ret != 0) {
ret = -errno;
DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
}
 
return ret;
}
 
#if 0
static int
drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
int ret;
 
VG_CLEAR(get_pipe_from_crtc_id);
get_pipe_from_crtc_id.crtc_id = crtc_id;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
&get_pipe_from_crtc_id);
if (ret != 0) {
/* We return -1 here to signal that we don't
* know which pipe is associated with this crtc.
* This lets the caller know that this information
* isn't available; using the wrong pipe for
* vblank waiting can cause the chipset to lock up
*/
return -1;
}
 
return get_pipe_from_crtc_id.pipe;
}
 
static int
drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
unsigned long size, void *data)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_pread pread;
int ret;
 
VG_CLEAR(pread);
pread.handle = bo_gem->gem_handle;
pread.offset = offset;
pread.size = size;
pread.data_ptr = (uint64_t) (uintptr_t) data;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PREAD,
&pread);
if (ret != 0) {
ret = -errno;
DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
(int)size, strerror(errno));
}
 
return ret;
}
 
#endif
 
/** Waits for all GPU rendering with the object to have completed. */
static void
drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
{
drm_intel_gem_bo_start_gtt_access(bo, 1);
}
 
/**
* Waits on a BO for the given amount of time.
*
* @bo: buffer object to wait for
* @timeout_ns: amount of time to wait in nanoseconds.
* If value is less than 0, an infinite wait will occur.
*
* Returns 0 if the wait was successful ie. the last batch referencing the
* object has completed within the allotted time. Otherwise some negative return
* value describes the error. Of particular interest is -ETIME when the wait has
* failed to yield the desired result.
*
* Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
* the operation to give up after a certain amount of time. Another subtle
* difference is the internal locking semantics are different (this variant does
* not hold the lock for the duration of the wait). This makes the wait subject
* to a larger userspace race window.
*
* The implementation shall wait until the object is no longer actively
* referenced within a batch buffer at the time of the call. The wait will
* not guarantee that the buffer is re-issued via another thread, or an flinked
* handle. Userspace must make sure this race does not occur if such precision
* is important.
*/
int drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_wait wait;
int ret;
 
if (!bufmgr_gem->has_wait_timeout) {
DBG("%s:%d: Timed wait is not supported. Falling back to "
"infinite wait\n", __FILE__, __LINE__);
if (timeout_ns) {
drm_intel_gem_bo_wait_rendering(bo);
return 0;
} else {
return drm_intel_gem_bo_busy(bo) ? -1 : 0;
}
}
 
wait.bo_handle = bo_gem->gem_handle;
wait.timeout_ns = timeout_ns;
wait.flags = 0;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
if (ret == -1)
return -errno;
 
return ret;
}
 
/**
* Sets the object to the GTT read and possibly write domain, used by the X
* 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
*
* In combination with drm_intel_gem_bo_pin() and manual fence management, we
* can do tiled pixmaps this way.
*/
void
drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_set_domain set_domain;
int ret;
 
VG_CLEAR(set_domain);
set_domain.handle = bo_gem->gem_handle;
set_domain.read_domains = I915_GEM_DOMAIN_GTT;
set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
if (ret != 0) {
DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle,
set_domain.read_domains, set_domain.write_domain,
strerror(errno));
}
}
 
static void
drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
int i;
 
free(bufmgr_gem->exec2_objects);
free(bufmgr_gem->exec_objects);
free(bufmgr_gem->exec_bos);
free(bufmgr_gem->aub_filename);
 
// pthread_mutex_destroy(&bufmgr_gem->lock);
 
/* Free any cached buffer objects we were going to reuse */
for (i = 0; i < bufmgr_gem->num_buckets; i++) {
struct drm_intel_gem_bo_bucket *bucket =
&bufmgr_gem->cache_bucket[i];
drm_intel_bo_gem *bo_gem;
 
while (!DRMLISTEMPTY(&bucket->head)) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
bucket->head.next, head);
DRMLISTDEL(&bo_gem->head);
 
drm_intel_gem_bo_free(&bo_gem->bo);
}
}
 
free(bufmgr);
}
 
/**
* Adds the target buffer to the validation list and adds the relocation
* to the reloc_buffer's relocation list.
*
* The relocation entry at the given offset must already contain the
* precomputed relocation value, because the kernel will optimize out
* the relocation entry write when the buffer hasn't moved from the
* last known offset in target_bo.
*/
static int
do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain,
bool need_fence)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
bool fenced_command;
 
if (bo_gem->has_error)
return -ENOMEM;
 
if (target_bo_gem->has_error) {
bo_gem->has_error = true;
return -ENOMEM;
}
 
/* We never use HW fences for rendering on 965+ */
if (bufmgr_gem->gen >= 4)
need_fence = false;
 
fenced_command = need_fence;
if (target_bo_gem->tiling_mode == I915_TILING_NONE)
need_fence = false;
 
/* Create a new relocation list if needed */
if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
return -ENOMEM;
 
/* Check overflow */
assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
 
/* Check args */
assert(offset <= bo->size - 4);
assert((write_domain & (write_domain - 1)) == 0);
 
/* Make sure that we're not adding a reloc to something whose size has
* already been accounted for.
*/
assert(!bo_gem->used_as_reloc_target);
if (target_bo_gem != bo_gem) {
target_bo_gem->used_as_reloc_target = true;
bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
}
/* An object needing a fence is a tiled buffer, so it won't have
* relocs to other buffers.
*/
if (need_fence)
target_bo_gem->reloc_tree_fences = 1;
bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
 
bo_gem->relocs[bo_gem->reloc_count].offset = offset;
bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
bo_gem->relocs[bo_gem->reloc_count].target_handle =
target_bo_gem->gem_handle;
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
 
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
if (target_bo != bo)
drm_intel_gem_bo_reference(target_bo);
if (fenced_command)
bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
DRM_INTEL_RELOC_FENCE;
else
bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
 
bo_gem->reloc_count++;
 
return 0;
}
 
static int
drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
 
return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
read_domains, write_domain,
!bufmgr_gem->fenced_relocs);
}
 
static int
drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain)
{
return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
read_domains, write_domain, true);
}
 
int
drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
return bo_gem->reloc_count;
}
 
/**
* Removes existing relocation entries in the BO after "start".
*
* This allows a user to avoid a two-step process for state setup with
* counting up all the buffer objects and doing a
* drm_intel_bufmgr_check_aperture_space() before emitting any of the
* relocations for the state setup. Instead, save the state of the
* batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
* state, and then check if it still fits in the aperture.
*
* Any further drm_intel_bufmgr_check_aperture_space() queries
* involving this buffer in the tree are undefined after this call.
*/
void
drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
// struct timespec time;
 
// clock_gettime(CLOCK_MONOTONIC, &time);
 
assert(bo_gem->reloc_count >= start);
/* Unreference the cleared target buffers */
for (i = start; i < bo_gem->reloc_count; i++) {
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
if (&target_bo_gem->bo != bo) {
bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
0);
}
}
bo_gem->reloc_count = start;
}
 
/**
* Walk the tree of relocations rooted at BO and accumulate the list of
* validations to be performed and update the relocation buffers with
* index values into the validation list.
*/
static void
drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
 
if (bo_gem->relocs == NULL)
return;
 
for (i = 0; i < bo_gem->reloc_count; i++) {
drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
 
if (target_bo == bo)
continue;
 
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
 
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc(target_bo);
 
/* Add the target to the validate list */
drm_intel_add_validate_buffer(target_bo);
}
}
 
static void
drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
int i;
 
if (bo_gem->relocs == NULL)
return;
 
for (i = 0; i < bo_gem->reloc_count; i++) {
drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
int need_fence;
 
if (target_bo == bo)
continue;
 
drm_intel_gem_bo_mark_mmaps_incoherent(bo);
 
/* Continue walking the tree depth-first. */
drm_intel_gem_bo_process_reloc2(target_bo);
 
need_fence = (bo_gem->reloc_target_info[i].flags &
DRM_INTEL_RELOC_FENCE);
 
/* Add the target to the validate list */
drm_intel_add_validate_buffer2(target_bo, need_fence);
}
}
 
 
static void
drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
{
int i;
 
for (i = 0; i < bufmgr_gem->exec_count; i++) {
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
/* Update the buffer offset */
if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
(unsigned long long)bufmgr_gem->exec_objects[i].
offset);
bo->offset = bufmgr_gem->exec_objects[i].offset;
}
}
}
 
static void
drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
{
int i;
 
for (i = 0; i < bufmgr_gem->exec_count; i++) {
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
 
/* Update the buffer offset */
if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
(unsigned long long)bufmgr_gem->exec2_objects[i].offset);
bo->offset = bufmgr_gem->exec2_objects[i].offset;
}
}
}
 
static void
aub_out(drm_intel_bufmgr_gem *bufmgr_gem, uint32_t data)
{
fwrite(&data, 1, 4, bufmgr_gem->aub_file);
}
 
static void
aub_out_data(drm_intel_bufmgr_gem *bufmgr_gem, void *data, size_t size)
{
fwrite(data, 1, size, bufmgr_gem->aub_file);
}
 
static void
aub_write_bo_data(drm_intel_bo *bo, uint32_t offset, uint32_t size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
uint32_t *data;
unsigned int i;
 
data = malloc(bo->size);
drm_intel_bo_get_subdata(bo, offset, size, data);
 
/* Easy mode: write out bo with no relocations */
if (!bo_gem->reloc_count) {
aub_out_data(bufmgr_gem, data, size);
free(data);
return;
}
 
/* Otherwise, handle the relocations while writing. */
for (i = 0; i < size / 4; i++) {
int r;
for (r = 0; r < bo_gem->reloc_count; r++) {
struct drm_i915_gem_relocation_entry *reloc;
drm_intel_reloc_target *info;
 
reloc = &bo_gem->relocs[r];
info = &bo_gem->reloc_target_info[r];
 
if (reloc->offset == offset + i * 4) {
drm_intel_bo_gem *target_gem;
uint32_t val;
 
target_gem = (drm_intel_bo_gem *)info->bo;
 
val = reloc->delta;
val += target_gem->aub_offset;
 
aub_out(bufmgr_gem, val);
data[i] = val;
break;
}
}
if (r == bo_gem->reloc_count) {
/* no relocation, just the data */
aub_out(bufmgr_gem, data[i]);
}
}
 
free(data);
}
 
static void
aub_bo_get_address(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
/* Give the object a graphics address in the AUB file. We
* don't just use the GEM object address because we do AUB
* dumping before execution -- we want to successfully log
* when the hardware might hang, and we might even want to aub
* capture for a driver trying to execute on a different
* generation of hardware by disabling the actual kernel exec
* call.
*/
bo_gem->aub_offset = bufmgr_gem->aub_offset;
bufmgr_gem->aub_offset += bo->size;
/* XXX: Handle aperture overflow. */
assert(bufmgr_gem->aub_offset < 256 * 1024 * 1024);
}
 
static void
aub_write_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
uint32_t offset, uint32_t size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
aub_out(bufmgr_gem,
CMD_AUB_TRACE_HEADER_BLOCK |
((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
aub_out(bufmgr_gem,
AUB_TRACE_MEMTYPE_GTT | type | AUB_TRACE_OP_DATA_WRITE);
aub_out(bufmgr_gem, subtype);
aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
aub_out(bufmgr_gem, size);
if (bufmgr_gem->gen >= 8)
aub_out(bufmgr_gem, 0);
aub_write_bo_data(bo, offset, size);
}
 
/**
* Break up large objects into multiple writes. Otherwise a 128kb VBO
* would overflow the 16 bits of size field in the packet header and
* everything goes badly after that.
*/
static void
aub_write_large_trace_block(drm_intel_bo *bo, uint32_t type, uint32_t subtype,
uint32_t offset, uint32_t size)
{
uint32_t block_size;
uint32_t sub_offset;
 
for (sub_offset = 0; sub_offset < size; sub_offset += block_size) {
block_size = size - sub_offset;
 
if (block_size > 8 * 4096)
block_size = 8 * 4096;
 
aub_write_trace_block(bo, type, subtype, offset + sub_offset,
block_size);
}
}
 
static void
aub_write_bo(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
uint32_t offset = 0;
unsigned i;
 
aub_bo_get_address(bo);
 
/* Write out each annotated section separately. */
for (i = 0; i < bo_gem->aub_annotation_count; ++i) {
drm_intel_aub_annotation *annotation =
&bo_gem->aub_annotations[i];
uint32_t ending_offset = annotation->ending_offset;
if (ending_offset > bo->size)
ending_offset = bo->size;
if (ending_offset > offset) {
aub_write_large_trace_block(bo, annotation->type,
annotation->subtype,
offset,
ending_offset - offset);
offset = ending_offset;
}
}
 
/* Write out any remaining unannotated data */
if (offset < bo->size) {
aub_write_large_trace_block(bo, AUB_TRACE_TYPE_NOTYPE, 0,
offset, bo->size - offset);
}
}
 
/*
* Make a ringbuffer on fly and dump it
*/
static void
aub_build_dump_ringbuffer(drm_intel_bufmgr_gem *bufmgr_gem,
uint32_t batch_buffer, int ring_flag)
{
uint32_t ringbuffer[4096];
int ring = AUB_TRACE_TYPE_RING_PRB0; /* The default ring */
int ring_count = 0;
 
if (ring_flag == I915_EXEC_BSD)
ring = AUB_TRACE_TYPE_RING_PRB1;
else if (ring_flag == I915_EXEC_BLT)
ring = AUB_TRACE_TYPE_RING_PRB2;
 
/* Make a ring buffer to execute our batchbuffer. */
memset(ringbuffer, 0, sizeof(ringbuffer));
if (bufmgr_gem->gen >= 8) {
ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START | (3 - 2);
ringbuffer[ring_count++] = batch_buffer;
ringbuffer[ring_count++] = 0;
} else {
ringbuffer[ring_count++] = AUB_MI_BATCH_BUFFER_START;
ringbuffer[ring_count++] = batch_buffer;
}
 
/* Write out the ring. This appears to trigger execution of
* the ring in the simulator.
*/
aub_out(bufmgr_gem,
CMD_AUB_TRACE_HEADER_BLOCK |
((bufmgr_gem->gen >= 8 ? 6 : 5) - 2));
aub_out(bufmgr_gem,
AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
aub_out(bufmgr_gem, 0); /* general/surface subtype */
aub_out(bufmgr_gem, bufmgr_gem->aub_offset);
aub_out(bufmgr_gem, ring_count * 4);
if (bufmgr_gem->gen >= 8)
aub_out(bufmgr_gem, 0);
 
/* FIXME: Need some flush operations here? */
aub_out_data(bufmgr_gem, ringbuffer, ring_count * 4);
 
/* Update offset pointer */
bufmgr_gem->aub_offset += 4096;
}
 
void
drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
int x1, int y1, int width, int height,
enum aub_dump_bmp_format format,
int pitch, int offset)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
uint32_t cpp;
 
switch (format) {
case AUB_DUMP_BMP_FORMAT_8BIT:
cpp = 1;
break;
case AUB_DUMP_BMP_FORMAT_ARGB_4444:
cpp = 2;
break;
case AUB_DUMP_BMP_FORMAT_ARGB_0888:
case AUB_DUMP_BMP_FORMAT_ARGB_8888:
cpp = 4;
break;
default:
printf("Unknown AUB dump format %d\n", format);
return;
}
 
if (!bufmgr_gem->aub_file)
return;
 
aub_out(bufmgr_gem, CMD_AUB_DUMP_BMP | 4);
aub_out(bufmgr_gem, (y1 << 16) | x1);
aub_out(bufmgr_gem,
(format << 24) |
(cpp << 19) |
pitch / 4);
aub_out(bufmgr_gem, (height << 16) | width);
aub_out(bufmgr_gem, bo_gem->aub_offset + offset);
aub_out(bufmgr_gem,
((bo_gem->tiling_mode != I915_TILING_NONE) ? (1 << 2) : 0) |
((bo_gem->tiling_mode == I915_TILING_Y) ? (1 << 3) : 0));
}
 
static void
aub_exec(drm_intel_bo *bo, int ring_flag, int used)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
bool batch_buffer_needs_annotations;
 
if (!bufmgr_gem->aub_file)
return;
 
/* If batch buffer is not annotated, annotate it the best we
* can.
*/
batch_buffer_needs_annotations = bo_gem->aub_annotation_count == 0;
if (batch_buffer_needs_annotations) {
drm_intel_aub_annotation annotations[2] = {
{ AUB_TRACE_TYPE_BATCH, 0, used },
{ AUB_TRACE_TYPE_NOTYPE, 0, bo->size }
};
drm_intel_bufmgr_gem_set_aub_annotations(bo, annotations, 2);
}
 
/* Write out all buffers to AUB memory */
for (i = 0; i < bufmgr_gem->exec_count; i++) {
aub_write_bo(bufmgr_gem->exec_bos[i]);
}
 
/* Remove any annotations we added */
if (batch_buffer_needs_annotations)
drm_intel_bufmgr_gem_set_aub_annotations(bo, NULL, 0);
 
/* Dump ring buffer */
aub_build_dump_ringbuffer(bufmgr_gem, bo_gem->aub_offset, ring_flag);
 
fflush(bufmgr_gem->aub_file);
 
/*
* One frame has been dumped. So reset the aub_offset for the next frame.
*
* FIXME: Can we do this?
*/
bufmgr_gem->aub_offset = 0x10000;
}
 
 
static int
do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int flags)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
struct drm_i915_gem_execbuffer2 execbuf;
int ret = 0;
int i;
 
switch (flags & 0x7) {
default:
return -EINVAL;
case I915_EXEC_BLT:
if (!bufmgr_gem->has_blt)
return -EINVAL;
break;
case I915_EXEC_BSD:
if (!bufmgr_gem->has_bsd)
return -EINVAL;
break;
case I915_EXEC_VEBOX:
if (!bufmgr_gem->has_vebox)
return -EINVAL;
break;
case I915_EXEC_RENDER:
case I915_EXEC_DEFAULT:
break;
}
 
// pthread_mutex_lock(&bufmgr_gem->lock);
/* Update indices and set up the validate list. */
drm_intel_gem_bo_process_reloc2(bo);
 
/* Add the batch buffer to the validation list. There are no relocations
* pointing to it.
*/
drm_intel_add_validate_buffer2(bo, 0);
 
VG_CLEAR(execbuf);
execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
execbuf.buffer_count = bufmgr_gem->exec_count;
execbuf.batch_start_offset = 0;
execbuf.batch_len = used;
execbuf.cliprects_ptr = (uintptr_t)cliprects;
execbuf.num_cliprects = num_cliprects;
execbuf.DR1 = 0;
execbuf.DR4 = DR4;
execbuf.flags = flags;
if (ctx == NULL)
i915_execbuffer2_set_context_id(execbuf, 0);
else
i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
execbuf.rsvd2 = 0;
 
aub_exec(bo, flags, used);
 
if (bufmgr_gem->no_exec)
goto skip_execution;
 
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_EXECBUFFER2,
&execbuf);
if (ret != 0) {
ret = -errno;
if (ret == -ENOSPC) {
DBG("Execbuffer fails to pin. "
"Estimate: %u. Actual: %u. Available: %u\n",
drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
bufmgr_gem->exec_count),
drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
bufmgr_gem->exec_count),
(unsigned int) bufmgr_gem->gtt_size);
}
}
drm_intel_update_buffer_offsets2(bufmgr_gem);
 
skip_execution:
if (bufmgr_gem->bufmgr.debug)
drm_intel_gem_dump_validation_list(bufmgr_gem);
 
for (i = 0; i < bufmgr_gem->exec_count; i++) {
drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
 
/* Disconnect the buffer from the validate list */
bo_gem->validate_index = -1;
bufmgr_gem->exec_bos[i] = NULL;
}
bufmgr_gem->exec_count = 0;
// pthread_mutex_unlock(&bufmgr_gem->lock);
 
return ret;
}
 
static int
drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4)
{
return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
I915_EXEC_RENDER);
}
 
static int
drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
unsigned int flags)
{
return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
flags);
}
 
int
drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
int used, unsigned int flags)
{
return do_exec2(bo, used, ctx, NULL, 0, 0, flags);
}
 
static int
drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_pin pin;
int ret;
 
VG_CLEAR(pin);
pin.handle = bo_gem->gem_handle;
pin.alignment = alignment;
 
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PIN,
&pin);
if (ret != 0)
return -errno;
 
bo->offset = pin.offset;
return 0;
}
 
static int
drm_intel_gem_bo_unpin(drm_intel_bo *bo)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_unpin unpin;
int ret;
 
VG_CLEAR(unpin);
unpin.handle = bo_gem->gem_handle;
 
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
if (ret != 0)
return -errno;
 
return 0;
}
 
static int
drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
uint32_t tiling_mode,
uint32_t stride)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
struct drm_i915_gem_set_tiling set_tiling;
int ret;
 
if (bo_gem->global_name == 0 &&
tiling_mode == bo_gem->tiling_mode &&
stride == bo_gem->stride)
return 0;
 
memset(&set_tiling, 0, sizeof(set_tiling));
// do {
/* set_tiling is slightly broken and overwrites the
* input on the error path, so we have to open code
* rmIoctl.
*/
set_tiling.handle = bo_gem->gem_handle;
set_tiling.tiling_mode = tiling_mode;
set_tiling.stride = stride;
 
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_TILING,
&set_tiling);
// } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
if (ret == -1)
return -errno;
 
bo_gem->tiling_mode = set_tiling.tiling_mode;
bo_gem->swizzle_mode = set_tiling.swizzle_mode;
bo_gem->stride = set_tiling.stride;
return 0;
}
 
static int
drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
 
/* Linear buffers have no stride. By ensuring that we only ever use
* stride 0 with linear buffers, we simplify our code.
*/
if (*tiling_mode == I915_TILING_NONE)
stride = 0;
 
ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
if (ret == 0)
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
*tiling_mode = bo_gem->tiling_mode;
return ret;
}
 
static int
drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
*tiling_mode = bo_gem->tiling_mode;
*swizzle_mode = bo_gem->swizzle_mode;
return 0;
}
 
#if 0
drm_intel_bo *
drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
int ret;
uint32_t handle;
drm_intel_bo_gem *bo_gem;
struct drm_i915_gem_get_tiling get_tiling;
drmMMListHead *list;
 
ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
 
/*
* See if the kernel has already returned this buffer to us. Just as
* for named buffers, we must not create two bo's pointing at the same
* kernel object
*/
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->gem_handle == handle) {
drm_intel_gem_bo_reference(&bo_gem->bo);
return &bo_gem->bo;
}
}
 
if (ret) {
fprintf(stderr,"ret is %d %d\n", ret, errno);
return NULL;
}
 
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
 
/* Determine size of bo. The fd-to-handle ioctl really should
* return the size, but it doesn't. If we have kernel 3.12 or
* later, we can lseek on the prime fd to get the size. Older
* kernels will just fail, in which case we fall back to the
* provided (estimated or guess size). */
ret = lseek(prime_fd, 0, SEEK_END);
if (ret != -1)
bo_gem->bo.size = ret;
else
bo_gem->bo.size = size;
 
bo_gem->bo.handle = handle;
bo_gem->bo.bufmgr = bufmgr;
 
bo_gem->gem_handle = handle;
 
atomic_set(&bo_gem->refcount, 1);
 
bo_gem->name = "prime";
bo_gem->validate_index = -1;
bo_gem->reloc_tree_fences = 0;
bo_gem->used_as_reloc_target = false;
bo_gem->has_error = false;
bo_gem->reusable = false;
 
DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
 
VG_CLEAR(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_GET_TILING,
&get_tiling);
if (ret != 0) {
drm_intel_gem_bo_unreference(&bo_gem->bo);
return NULL;
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
return &bo_gem->bo;
}
 
int
drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
if (DRMLISTEMPTY(&bo_gem->name_list))
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
 
if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
DRM_CLOEXEC, prime_fd) != 0)
return -errno;
 
bo_gem->reusable = false;
 
return 0;
}
#endif
 
static int
drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret;
 
if (!bo_gem->global_name) {
struct drm_gem_flink flink;
 
VG_CLEAR(flink);
flink.handle = bo_gem->gem_handle;
 
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
if (ret != 0)
return -errno;
 
bo_gem->global_name = flink.name;
bo_gem->reusable = false;
 
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
}
 
*name = bo_gem->global_name;
return 0;
}
 
/**
* Enables unlimited caching of buffer objects for reuse.
*
* This is potentially very memory expensive, as the cache at each bucket
* size is only bounded by how many buffers of that size we've managed to have
* in flight at once.
*/
void
drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
 
bufmgr_gem->bo_reuse = true;
}
 
/**
* Enable use of fenced reloc type.
*
* New code should enable this to avoid unnecessary fence register
* allocation. If this option is not enabled, all relocs will have fence
* register allocated.
*/
void
drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
 
if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
bufmgr_gem->fenced_relocs = true;
}
 
/**
* Return the additional aperture space required by the tree of buffer objects
* rooted at bo.
*/
static int
drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
int total = 0;
 
if (bo == NULL || bo_gem->included_in_check_aperture)
return 0;
 
total += bo->size;
bo_gem->included_in_check_aperture = true;
 
for (i = 0; i < bo_gem->reloc_count; i++)
total +=
drm_intel_gem_bo_get_aperture_space(bo_gem->
reloc_target_info[i].bo);
 
return total;
}
 
/**
* Count the number of buffers in this list that need a fence reg
*
* If the count is greater than the number of available regs, we'll have
* to ask the caller to resubmit a batch with fewer tiled buffers.
*
* This function over-counts if the same buffer is used multiple times.
*/
static unsigned int
drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
{
int i;
unsigned int total = 0;
 
for (i = 0; i < count; i++) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
 
if (bo_gem == NULL)
continue;
 
total += bo_gem->reloc_tree_fences;
}
return total;
}
 
/**
* Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
* for the next drm_intel_bufmgr_check_aperture_space() call.
*/
static void
drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
 
if (bo == NULL || !bo_gem->included_in_check_aperture)
return;
 
bo_gem->included_in_check_aperture = false;
 
for (i = 0; i < bo_gem->reloc_count; i++)
drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
reloc_target_info[i].bo);
}
 
/**
* Return a conservative estimate for the amount of aperture required
* for a collection of buffers. This may double-count some buffers.
*/
static unsigned int
drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
{
int i;
unsigned int total = 0;
 
for (i = 0; i < count; i++) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
if (bo_gem != NULL)
total += bo_gem->reloc_tree_size;
}
return total;
}
 
/**
* Return the amount of aperture needed for a collection of buffers.
* This avoids double counting any buffers, at the cost of looking
* at every buffer in the set.
*/
static unsigned int
drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
{
int i;
unsigned int total = 0;
 
for (i = 0; i < count; i++) {
total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
/* For the first buffer object in the array, we get an
* accurate count back for its reloc_tree size (since nothing
* had been flagged as being counted yet). We can save that
* value out as a more conservative reloc_tree_size that
* avoids double-counting target buffers. Since the first
* buffer happens to usually be the batch buffer in our
* callers, this can pull us back from doing the tree
* walk on every new batch emit.
*/
if (i == 0) {
drm_intel_bo_gem *bo_gem =
(drm_intel_bo_gem *) bo_array[i];
bo_gem->reloc_tree_size = total;
}
}
 
for (i = 0; i < count; i++)
drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
return total;
}
 
/**
* Return -1 if the batchbuffer should be flushed before attempting to
* emit rendering referencing the buffers pointed to by bo_array.
*
* This is required because if we try to emit a batchbuffer with relocations
* to a tree of buffers that won't simultaneously fit in the aperture,
* the rendering will return an error at a point where the software is not
* prepared to recover from it.
*
* However, we also want to emit the batchbuffer significantly before we reach
* the limit, as a series of batchbuffers each of which references buffers
* covering almost all of the aperture means that at each emit we end up
* waiting to evict a buffer from the last rendering, and we get synchronous
* performance. By emitting smaller batchbuffers, we eat some CPU overhead to
* get better parallelism.
*/
static int
drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
{
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
unsigned int total = 0;
unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
int total_fences;
 
/* Check for fence reg constraints if necessary */
if (bufmgr_gem->available_fences) {
total_fences = drm_intel_gem_total_fences(bo_array, count);
if (total_fences > bufmgr_gem->available_fences)
return -ENOSPC;
}
 
total = drm_intel_gem_estimate_batch_space(bo_array, count);
 
if (total > threshold)
total = drm_intel_gem_compute_batch_space(bo_array, count);
 
if (total > threshold) {
DBG("check_space: overflowed available aperture, "
"%dkb vs %dkb\n",
total / 1024, (int)bufmgr_gem->gtt_size / 1024);
return -ENOSPC;
} else {
DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
(int)bufmgr_gem->gtt_size / 1024);
return 0;
}
}
 
/*
* Disable buffer reuse for objects which are shared with the kernel
* as scanout buffers
*/
static int
drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
bo_gem->reusable = false;
return 0;
}
 
static int
drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
return bo_gem->reusable;
}
 
static int
_drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int i;
 
for (i = 0; i < bo_gem->reloc_count; i++) {
if (bo_gem->reloc_target_info[i].bo == target_bo)
return 1;
if (bo == bo_gem->reloc_target_info[i].bo)
continue;
if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
target_bo))
return 1;
}
 
return 0;
}
 
/** Return true if target_bo is referenced by bo's relocation tree. */
static int
drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
{
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
 
if (bo == NULL || target_bo == NULL)
return 0;
if (target_bo_gem->used_as_reloc_target)
return _drm_intel_gem_bo_references(bo, target_bo);
return 0;
}
 
static void
add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
{
unsigned int i = bufmgr_gem->num_buckets;
 
assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
 
DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
bufmgr_gem->cache_bucket[i].size = size;
bufmgr_gem->num_buckets++;
}
 
static void
init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
{
unsigned long size, cache_max_size = 64 * 1024 * 1024;
 
/* OK, so power of two buckets was too wasteful of memory.
* Give 3 other sizes between each power of two, to hopefully
* cover things accurately enough. (The alternative is
* probably to just go for exact matching of sizes, and assume
* that for things like composited window resize the tiled
* width/height alignment and rounding of sizes to pages will
* get us useful cache hit rates anyway)
*/
add_bucket(bufmgr_gem, 4096);
add_bucket(bufmgr_gem, 4096 * 2);
add_bucket(bufmgr_gem, 4096 * 3);
 
/* Initialize the linked lists for BO reuse cache. */
for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
add_bucket(bufmgr_gem, size);
 
add_bucket(bufmgr_gem, size + size * 1 / 4);
add_bucket(bufmgr_gem, size + size * 2 / 4);
add_bucket(bufmgr_gem, size + size * 3 / 4);
}
}
 
void
drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
 
bufmgr_gem->vma_max = limit;
 
drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
}
 
/**
* Get the PCI ID for the device. This can be overridden by setting the
* INTEL_DEVID_OVERRIDE environment variable to the desired ID.
*/
static int
get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
{
char *devid_override;
int devid;
int ret;
drm_i915_getparam_t gp;
 
VG_CLEAR(devid);
VG_CLEAR(gp);
gp.param = I915_PARAM_CHIPSET_ID;
gp.value = &devid;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
}
return devid;
}
 
int
drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
 
return bufmgr_gem->pci_device;
}
 
/**
* Sets up AUB dumping.
*
* This is a trace file format that can be used with the simulator.
* Packets are emitted in a format somewhat like GPU command packets.
* You can set up a GTT and upload your objects into the referenced
* space, then send off batchbuffers and get BMPs out the other end.
*/
void
drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
int entry = 0x200003;
int i;
int gtt_size = 0x10000;
const char *filename;
 
if (!enable) {
if (bufmgr_gem->aub_file) {
fclose(bufmgr_gem->aub_file);
bufmgr_gem->aub_file = NULL;
}
return;
}
 
bufmgr_gem->aub_file = fopen("intel.aub", "w+");
if (!bufmgr_gem->aub_file)
return;
 
/* Start allocating objects from just after the GTT. */
bufmgr_gem->aub_offset = gtt_size;
 
/* Start with a (required) version packet. */
aub_out(bufmgr_gem, CMD_AUB_HEADER | (13 - 2));
aub_out(bufmgr_gem,
(4 << AUB_HEADER_MAJOR_SHIFT) |
(0 << AUB_HEADER_MINOR_SHIFT));
for (i = 0; i < 8; i++) {
aub_out(bufmgr_gem, 0); /* app name */
}
aub_out(bufmgr_gem, 0); /* timestamp */
aub_out(bufmgr_gem, 0); /* timestamp */
aub_out(bufmgr_gem, 0); /* comment len */
 
/* Set up the GTT. The max we can handle is 256M */
aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2));
aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE);
aub_out(bufmgr_gem, 0); /* subtype */
aub_out(bufmgr_gem, 0); /* offset */
aub_out(bufmgr_gem, gtt_size); /* size */
for (i = 0x000; i < gtt_size; i += 4, entry += 0x1000) {
aub_out(bufmgr_gem, entry);
}
}
 
drm_intel_context *
drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
struct drm_i915_gem_context_create create;
drm_intel_context *context = NULL;
int ret;
 
VG_CLEAR(create);
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
if (ret != 0) {
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
strerror(errno));
return NULL;
}
 
context = calloc(1, sizeof(*context));
context->ctx_id = create.ctx_id;
context->bufmgr = bufmgr;
 
return context;
}
 
void
drm_intel_gem_context_destroy(drm_intel_context *ctx)
{
drm_intel_bufmgr_gem *bufmgr_gem;
struct drm_i915_gem_context_destroy destroy;
int ret;
 
if (ctx == NULL)
return;
 
VG_CLEAR(destroy);
 
bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
destroy.ctx_id = ctx->ctx_id;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
&destroy);
if (ret != 0)
fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
strerror(errno));
 
free(ctx);
}
 
int
drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
uint32_t offset,
uint64_t *result)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
struct drm_i915_reg_read reg_read;
int ret;
 
VG_CLEAR(reg_read);
reg_read.offset = offset;
 
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, &reg_read);
 
*result = reg_read.val;
return ret;
}
 
 
/**
* Annotate the given bo for use in aub dumping.
*
* \param annotations is an array of drm_intel_aub_annotation objects
* describing the type of data in various sections of the bo. Each
* element of the array specifies the type and subtype of a section of
* the bo, and the past-the-end offset of that section. The elements
* of \c annotations must be sorted so that ending_offset is
* increasing.
*
* \param count is the number of elements in the \c annotations array.
* If \c count is zero, then \c annotations will not be dereferenced.
*
* Annotations are copied into a private data structure, so caller may
* re-use the memory pointed to by \c annotations after the call
* returns.
*
* Annotations are stored for the lifetime of the bo; to reset to the
* default state (no annotations), call this function with a \c count
* of zero.
*/
void
drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
drm_intel_aub_annotation *annotations,
unsigned count)
{
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
unsigned size = sizeof(*annotations) * count;
drm_intel_aub_annotation *new_annotations =
count > 0 ? realloc(bo_gem->aub_annotations, size) : NULL;
if (new_annotations == NULL) {
free(bo_gem->aub_annotations);
bo_gem->aub_annotations = NULL;
bo_gem->aub_annotation_count = 0;
return;
}
memcpy(new_annotations, annotations, size);
bo_gem->aub_annotations = new_annotations;
bo_gem->aub_annotation_count = count;
}
 
/**
* Initializes the GEM buffer manager, which uses the kernel to allocate, map,
* and manage map buffer objections.
*
* \param fd File descriptor of the opened DRM device.
*/
drm_intel_bufmgr *
drm_intel_bufmgr_gem_init(int fd, int batch_size)
{
drm_intel_bufmgr_gem *bufmgr_gem;
struct drm_i915_gem_get_aperture aperture;
drm_i915_getparam_t gp;
int ret, tmp;
bool exec2 = false;
 
bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
if (bufmgr_gem == NULL)
return NULL;
 
bufmgr_gem->fd = fd;
 
// if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
// free(bufmgr_gem);
// return NULL;
// }
 
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_GET_APERTURE,
&aperture);
 
if (ret == 0)
bufmgr_gem->gtt_size = aperture.aper_available_size;
else {
printf("DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
strerror(errno));
bufmgr_gem->gtt_size = 128 * 1024 * 1024;
printf("Assuming %dkB available aperture size.\n"
"May lead to reduced performance or incorrect "
"rendering.\n",
(int)bufmgr_gem->gtt_size / 1024);
}
 
bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
 
if (IS_GEN2(bufmgr_gem->pci_device))
bufmgr_gem->gen = 2;
else if (IS_GEN3(bufmgr_gem->pci_device))
bufmgr_gem->gen = 3;
else if (IS_GEN4(bufmgr_gem->pci_device))
bufmgr_gem->gen = 4;
else if (IS_GEN5(bufmgr_gem->pci_device))
bufmgr_gem->gen = 5;
else if (IS_GEN6(bufmgr_gem->pci_device))
bufmgr_gem->gen = 6;
else if (IS_GEN7(bufmgr_gem->pci_device))
bufmgr_gem->gen = 7;
else {
free(bufmgr_gem);
return NULL;
}
 
// printf("gen %d\n", bufmgr_gem->gen);
 
if (IS_GEN3(bufmgr_gem->pci_device) &&
bufmgr_gem->gtt_size > 256*1024*1024) {
/* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
* be used for tiled blits. To simplify the accounting, just
* substract the unmappable part (fixed to 256MB on all known
* gen3 devices) if the kernel advertises it. */
bufmgr_gem->gtt_size -= 256*1024*1024;
}
 
VG_CLEAR(gp);
gp.value = &tmp;
 
gp.param = I915_PARAM_HAS_EXECBUF2;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (!ret)
exec2 = true;
 
gp.param = I915_PARAM_HAS_BSD;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_bsd = ret == 0;
 
gp.param = I915_PARAM_HAS_BLT;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_blt = ret == 0;
 
gp.param = I915_PARAM_HAS_RELAXED_FENCING;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_relaxed_fencing = ret == 0;
 
gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_wait_timeout = ret == 0;
 
gp.param = I915_PARAM_HAS_LLC;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret != 0) {
/* Kernel does not supports HAS_LLC query, fallback to GPU
* generation detection and assume that we have LLC on GEN6/7
*/
bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
IS_GEN7(bufmgr_gem->pci_device));
} else
bufmgr_gem->has_llc = *gp.value;
 
gp.param = I915_PARAM_HAS_VEBOX;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
 
if (bufmgr_gem->gen < 4) {
gp.param = I915_PARAM_NUM_FENCES_AVAIL;
gp.value = &bufmgr_gem->available_fences;
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
if (ret) {
fprintf(stderr, "get fences failed: %d [%d]\n", ret,
errno);
fprintf(stderr, "param: %d, val: %d\n", gp.param,
*gp.value);
bufmgr_gem->available_fences = 0;
} else {
/* XXX The kernel reports the total number of fences,
* including any that may be pinned.
*
* We presume that there will be at least one pinned
* fence for the scanout buffer, but there may be more
* than one scanout and the user may be manually
* pinning buffers. Let's move to execbuffer2 and
* thereby forget the insanity of using fences...
*/
bufmgr_gem->available_fences -= 2;
if (bufmgr_gem->available_fences < 0)
bufmgr_gem->available_fences = 0;
}
}
 
/* Let's go with one relocation per every 2 dwords (but round down a bit
* since a power of two will mean an extra page allocation for the reloc
* buffer).
*
* Every 4 was too few for the blender benchmark.
*/
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
 
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
// bufmgr_gem->bufmgr.bo_alloc_for_render =
// drm_intel_gem_bo_alloc_for_render;
bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
// bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
/* Use the new one if available */
// if (exec2) {
bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
// } else
// bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
bufmgr_gem->bufmgr.debug = 0;
bufmgr_gem->bufmgr.check_aperture_space =
drm_intel_gem_check_aperture_space;
bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
// bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
// drm_intel_gem_get_pipe_from_crtc_id;
bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
 
DRMINITLISTHEAD(&bufmgr_gem->named);
init_cache_buckets(bufmgr_gem);
 
DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
bufmgr_gem->vma_max = -1; /* unlimited by default */
 
return &bufmgr_gem->bufmgr;
}
 
 
drm_intel_bo *
bo_create_from_gem_handle(drm_intel_bufmgr *bufmgr,
unsigned int size, unsigned int handle)
{
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
drm_intel_bo_gem *bo_gem;
int ret;
struct drm_i915_gem_get_tiling get_tiling;
drmMMListHead *list;
 
/* At the moment most applications only have a few named bo.
* For instance, in a DRI client only the render buffers passed
* between X and the client are named. And since X returns the
* alternating names for the front/back buffer a linear search
* provides a sufficiently fast match.
*/
for (list = bufmgr_gem->named.next;
list != &bufmgr_gem->named;
list = list->next) {
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list);
if (bo_gem->gem_handle == handle) {
return &bo_gem->bo;
}
}
 
bo_gem = calloc(1, sizeof(*bo_gem));
if (!bo_gem)
return NULL;
 
bo_gem->bo.size = size;
bo_gem->bo.offset = 0;
bo_gem->bo.virtual = NULL;
bo_gem->bo.bufmgr = bufmgr;
bo_gem->name = NULL;
atomic_set(&bo_gem->refcount, 1);
bo_gem->validate_index = -1;
bo_gem->gem_handle = handle;
bo_gem->bo.handle = handle;
bo_gem->global_name = 0;
bo_gem->reusable = false;
 
VG_CLEAR(get_tiling);
get_tiling.handle = bo_gem->gem_handle;
ret = drmIoctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_GET_TILING,
&get_tiling);
if (ret != 0) {
drm_intel_gem_bo_unreference(&bo_gem->bo);
return NULL;
}
bo_gem->tiling_mode = get_tiling.tiling_mode;
bo_gem->swizzle_mode = get_tiling.swizzle_mode;
/* XXX stride is unknown */
drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
 
DRMINITLISTHEAD(&bo_gem->vma_list);
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named);
printf("bo_create_from_handle: %d\n", handle);
 
return &bo_gem->bo;
}
/contrib/sdk/sources/libdrm/intel/intel_bufmgr_priv.h
0,0 → 1,292
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
 
/**
* @file intel_bufmgr_priv.h
*
* Private definitions of Intel-specific bufmgr functions and structures.
*/
 
#ifndef INTEL_BUFMGR_PRIV_H
#define INTEL_BUFMGR_PRIV_H
 
/**
* Context for a buffer manager instance.
*
* Contains public methods followed by private storage for the buffer manager.
*/
struct _drm_intel_bufmgr {
/**
* Allocate a buffer object.
*
* Buffer objects are not necessarily initially mapped into CPU virtual
* address space or graphics device aperture. They must be mapped
* using bo_map() or drm_intel_gem_bo_map_gtt() to be used by the CPU.
*/
drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
unsigned long size, unsigned int alignment);
 
/**
* Allocate a buffer object, hinting that it will be used as a
* render target.
*
* This is otherwise the same as bo_alloc.
*/
// drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
// const char *name,
// unsigned long size,
// unsigned int alignment);
 
/**
* Allocate a tiled buffer object.
*
* Alignment for tiled objects is set automatically; the 'flags'
* argument provides a hint about how the object will be used initially.
*
* Valid tiling formats are:
* I915_TILING_NONE
* I915_TILING_X
* I915_TILING_Y
*
* Note the tiling format may be rejected; callers should check the
* 'tiling_mode' field on return, as well as the pitch value, which
* may have been rounded up to accommodate for tiling restrictions.
*/
drm_intel_bo *(*bo_alloc_tiled) (drm_intel_bufmgr *bufmgr,
const char *name,
int x, int y, int cpp,
uint32_t *tiling_mode,
unsigned long *pitch,
unsigned long flags);
 
/** Takes a reference on a buffer object */
void (*bo_reference) (drm_intel_bo *bo);
 
/**
* Releases a reference on a buffer object, freeing the data if
* no references remain.
*/
void (*bo_unreference) (drm_intel_bo *bo);
 
/**
* Maps the buffer into userspace.
*
* This function will block waiting for any existing execution on the
* buffer to complete, first. The resulting mapping is available at
* buf->virtual.
*/
int (*bo_map) (drm_intel_bo *bo, int write_enable);
 
/**
* Reduces the refcount on the userspace mapping of the buffer
* object.
*/
int (*bo_unmap) (drm_intel_bo *bo);
 
/**
* Write data into an object.
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
unsigned long size, const void *data);
 
/**
* Read data from an object
*
* This is an optional function, if missing,
* drm_intel_bo will map/memcpy/unmap.
*/
// int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
// unsigned long size, void *data);
 
/**
* Waits for rendering to an object by the GPU to have completed.
*
* This is not required for any access to the BO by bo_map,
* bo_subdata, etc. It is merely a way for the driver to implement
* glFinish.
*/
void (*bo_wait_rendering) (drm_intel_bo *bo);
 
/**
* Tears down the buffer manager instance.
*/
void (*destroy) (drm_intel_bufmgr *bufmgr);
 
/**
* Add relocation entry in reloc_buf, which will be updated with the
* target buffer's real offset on on command submission.
*
* Relocations remain in place for the lifetime of the buffer object.
*
* \param bo Buffer to write the relocation into.
* \param offset Byte offset within reloc_bo of the pointer to
* target_bo.
* \param target_bo Buffer whose offset should be written into the
* relocation entry.
* \param target_offset Constant value to be added to target_bo's
* offset in relocation entry.
* \param read_domains GEM read domains which the buffer will be
* read into by the command that this relocation
* is part of.
* \param write_domains GEM read domains which the buffer will be
* dirtied in by the command that this
* relocation is part of.
*/
int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo, uint32_t target_offset,
uint32_t read_domains, uint32_t write_domain);
int (*bo_emit_reloc_fence)(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo *target_bo,
uint32_t target_offset,
uint32_t read_domains,
uint32_t write_domain);
 
/** Executes the command buffer pointed to by bo. */
int (*bo_exec) (drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4);
 
/** Executes the command buffer pointed to by bo on the selected
* ring buffer
*/
int (*bo_mrb_exec) (drm_intel_bo *bo, int used,
drm_clip_rect_t *cliprects, int num_cliprects,
int DR4, unsigned flags);
 
/**
* Pin a buffer to the aperture and fix the offset until unpinned
*
* \param buf Buffer to pin
* \param alignment Required alignment for aperture, in bytes
*/
int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
 
/**
* Unpin a buffer from the aperture, allowing it to be removed
*
* \param buf Buffer to unpin
*/
int (*bo_unpin) (drm_intel_bo *bo);
 
/**
* Ask that the buffer be placed in tiling mode
*
* \param buf Buffer to set tiling mode for
* \param tiling_mode desired, and returned tiling mode
*/
int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t stride);
 
/**
* Get the current tiling (and resulting swizzling) mode for the bo.
*
* \param buf Buffer to get tiling mode for
* \param tiling_mode returned tiling mode
* \param swizzle_mode returned swizzling mode
*/
int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
uint32_t * swizzle_mode);
 
/**
* Create a visible name for a buffer which can be used by other apps
*
* \param buf Buffer to create a name for
* \param name Returned name
*/
int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
 
/**
* Returns 1 if mapping the buffer for write could cause the process
* to block, due to the object being active in the GPU.
*/
int (*bo_busy) (drm_intel_bo *bo);
 
/**
* Specify the volatility of the buffer.
* \param bo Buffer to create a name for
* \param madv The purgeable status
*
* Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
* reclaimed under memory pressure. If you subsequently require the buffer,
* then you must pass I915_MADV_WILLNEED to mark the buffer as required.
*
* Returns 1 if the buffer was retained, or 0 if it was discarded whilst
* marked as I915_MADV_DONTNEED.
*/
int (*bo_madvise) (drm_intel_bo *bo, int madv);
 
int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
 
/**
* Disable buffer reuse for buffers which will be shared in some way,
* as with scanout buffers. When the buffer reference count goes to
* zero, it will be freed and not placed in the reuse list.
*
* \param bo Buffer to disable reuse for
*/
int (*bo_disable_reuse) (drm_intel_bo *bo);
 
/**
* Query whether a buffer is reusable.
*
* \param bo Buffer to query
*/
int (*bo_is_reusable) (drm_intel_bo *bo);
 
/**
*
* Return the pipe associated with a crtc_id so that vblank
* synchronization can use the correct data in the request.
* This is only supported for KMS and gem at this point, when
* unsupported, this function returns -1 and leaves the decision
* of what to do in that case to the caller
*
* \param bufmgr the associated buffer manager
* \param crtc_id the crtc identifier
*/
// int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
 
/** Returns true if target_bo is in the relocation tree rooted at bo. */
int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
 
/**< Enables verbose debugging printouts */
int debug;
};
 
struct _drm_intel_context {
unsigned int ctx_id;
struct _drm_intel_bufmgr *bufmgr;
};
 
#define ALIGN(value, alignment) ((value + alignment - 1) & ~(alignment - 1))
#define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
#define ROUND_UP_TO_MB(x) ROUND_UP_TO((x), 1024*1024)
 
#endif /* INTEL_BUFMGR_PRIV_H */
/contrib/sdk/sources/libdrm/intel/intel_chipset.h
0,0 → 1,325
/*
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef _INTEL_CHIPSET_H
#define _INTEL_CHIPSET_H
 
#define PCI_CHIP_I810 0x7121
#define PCI_CHIP_I810_DC100 0x7123
#define PCI_CHIP_I810_E 0x7125
#define PCI_CHIP_I815 0x1132
 
#define PCI_CHIP_I830_M 0x3577
#define PCI_CHIP_845_G 0x2562
#define PCI_CHIP_I855_GM 0x3582
#define PCI_CHIP_I865_G 0x2572
 
#define PCI_CHIP_I915_G 0x2582
#define PCI_CHIP_E7221_G 0x258A
#define PCI_CHIP_I915_GM 0x2592
#define PCI_CHIP_I945_G 0x2772
#define PCI_CHIP_I945_GM 0x27A2
#define PCI_CHIP_I945_GME 0x27AE
 
#define PCI_CHIP_Q35_G 0x29B2
#define PCI_CHIP_G33_G 0x29C2
#define PCI_CHIP_Q33_G 0x29D2
 
#define PCI_CHIP_IGD_GM 0xA011
#define PCI_CHIP_IGD_G 0xA001
 
#define IS_IGDGM(devid) ((devid) == PCI_CHIP_IGD_GM)
#define IS_IGDG(devid) ((devid) == PCI_CHIP_IGD_G)
#define IS_IGD(devid) (IS_IGDG(devid) || IS_IGDGM(devid))
 
#define PCI_CHIP_I965_G 0x29A2
#define PCI_CHIP_I965_Q 0x2992
#define PCI_CHIP_I965_G_1 0x2982
#define PCI_CHIP_I946_GZ 0x2972
#define PCI_CHIP_I965_GM 0x2A02
#define PCI_CHIP_I965_GME 0x2A12
 
#define PCI_CHIP_GM45_GM 0x2A42
 
#define PCI_CHIP_IGD_E_G 0x2E02
#define PCI_CHIP_Q45_G 0x2E12
#define PCI_CHIP_G45_G 0x2E22
#define PCI_CHIP_G41_G 0x2E32
 
#define PCI_CHIP_ILD_G 0x0042
#define PCI_CHIP_ILM_G 0x0046
 
#define PCI_CHIP_SANDYBRIDGE_GT1 0x0102 /* desktop */
#define PCI_CHIP_SANDYBRIDGE_GT2 0x0112
#define PCI_CHIP_SANDYBRIDGE_GT2_PLUS 0x0122
#define PCI_CHIP_SANDYBRIDGE_M_GT1 0x0106 /* mobile */
#define PCI_CHIP_SANDYBRIDGE_M_GT2 0x0116
#define PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS 0x0126
#define PCI_CHIP_SANDYBRIDGE_S 0x010A /* server */
 
#define PCI_CHIP_IVYBRIDGE_GT1 0x0152 /* desktop */
#define PCI_CHIP_IVYBRIDGE_GT2 0x0162
#define PCI_CHIP_IVYBRIDGE_M_GT1 0x0156 /* mobile */
#define PCI_CHIP_IVYBRIDGE_M_GT2 0x0166
#define PCI_CHIP_IVYBRIDGE_S 0x015a /* server */
#define PCI_CHIP_IVYBRIDGE_S_GT2 0x016a /* server */
 
#define PCI_CHIP_HASWELL_GT1 0x0402 /* Desktop */
#define PCI_CHIP_HASWELL_GT2 0x0412
#define PCI_CHIP_HASWELL_GT3 0x0422
#define PCI_CHIP_HASWELL_M_GT1 0x0406 /* Mobile */
#define PCI_CHIP_HASWELL_M_GT2 0x0416
#define PCI_CHIP_HASWELL_M_GT3 0x0426
#define PCI_CHIP_HASWELL_S_GT1 0x040A /* Server */
#define PCI_CHIP_HASWELL_S_GT2 0x041A
#define PCI_CHIP_HASWELL_S_GT3 0x042A
#define PCI_CHIP_HASWELL_B_GT1 0x040B /* Reserved */
#define PCI_CHIP_HASWELL_B_GT2 0x041B
#define PCI_CHIP_HASWELL_B_GT3 0x042B
#define PCI_CHIP_HASWELL_E_GT1 0x040E /* Reserved */
#define PCI_CHIP_HASWELL_E_GT2 0x041E
#define PCI_CHIP_HASWELL_E_GT3 0x042E
#define PCI_CHIP_HASWELL_SDV_GT1 0x0C02 /* Desktop */
#define PCI_CHIP_HASWELL_SDV_GT2 0x0C12
#define PCI_CHIP_HASWELL_SDV_GT3 0x0C22
#define PCI_CHIP_HASWELL_SDV_M_GT1 0x0C06 /* Mobile */
#define PCI_CHIP_HASWELL_SDV_M_GT2 0x0C16
#define PCI_CHIP_HASWELL_SDV_M_GT3 0x0C26
#define PCI_CHIP_HASWELL_SDV_S_GT1 0x0C0A /* Server */
#define PCI_CHIP_HASWELL_SDV_S_GT2 0x0C1A
#define PCI_CHIP_HASWELL_SDV_S_GT3 0x0C2A
#define PCI_CHIP_HASWELL_SDV_B_GT1 0x0C0B /* Reserved */
#define PCI_CHIP_HASWELL_SDV_B_GT2 0x0C1B
#define PCI_CHIP_HASWELL_SDV_B_GT3 0x0C2B
#define PCI_CHIP_HASWELL_SDV_E_GT1 0x0C0E /* Reserved */
#define PCI_CHIP_HASWELL_SDV_E_GT2 0x0C1E
#define PCI_CHIP_HASWELL_SDV_E_GT3 0x0C2E
#define PCI_CHIP_HASWELL_ULT_GT1 0x0A02 /* Desktop */
#define PCI_CHIP_HASWELL_ULT_GT2 0x0A12
#define PCI_CHIP_HASWELL_ULT_GT3 0x0A22
#define PCI_CHIP_HASWELL_ULT_M_GT1 0x0A06 /* Mobile */
#define PCI_CHIP_HASWELL_ULT_M_GT2 0x0A16
#define PCI_CHIP_HASWELL_ULT_M_GT3 0x0A26
#define PCI_CHIP_HASWELL_ULT_S_GT1 0x0A0A /* Server */
#define PCI_CHIP_HASWELL_ULT_S_GT2 0x0A1A
#define PCI_CHIP_HASWELL_ULT_S_GT3 0x0A2A
#define PCI_CHIP_HASWELL_ULT_B_GT1 0x0A0B /* Reserved */
#define PCI_CHIP_HASWELL_ULT_B_GT2 0x0A1B
#define PCI_CHIP_HASWELL_ULT_B_GT3 0x0A2B
#define PCI_CHIP_HASWELL_ULT_E_GT1 0x0A0E /* Reserved */
#define PCI_CHIP_HASWELL_ULT_E_GT2 0x0A1E
#define PCI_CHIP_HASWELL_ULT_E_GT3 0x0A2E
#define PCI_CHIP_HASWELL_CRW_GT1 0x0D02 /* Desktop */
#define PCI_CHIP_HASWELL_CRW_GT2 0x0D12
#define PCI_CHIP_HASWELL_CRW_GT3 0x0D22
#define PCI_CHIP_HASWELL_CRW_M_GT1 0x0D06 /* Mobile */
#define PCI_CHIP_HASWELL_CRW_M_GT2 0x0D16
#define PCI_CHIP_HASWELL_CRW_M_GT3 0x0D26
#define PCI_CHIP_HASWELL_CRW_S_GT1 0x0D0A /* Server */
#define PCI_CHIP_HASWELL_CRW_S_GT2 0x0D1A
#define PCI_CHIP_HASWELL_CRW_S_GT3 0x0D2A
#define PCI_CHIP_HASWELL_CRW_B_GT1 0x0D0B /* Reserved */
#define PCI_CHIP_HASWELL_CRW_B_GT2 0x0D1B
#define PCI_CHIP_HASWELL_CRW_B_GT3 0x0D2B
#define PCI_CHIP_HASWELL_CRW_E_GT1 0x0D0E /* Reserved */
#define PCI_CHIP_HASWELL_CRW_E_GT2 0x0D1E
#define PCI_CHIP_HASWELL_CRW_E_GT3 0x0D2E
#define BDW_SPARE 0x2
#define BDW_ULT 0x6
#define BDW_SERVER 0xa
#define BDW_IRIS 0xb
#define BDW_WORKSTATION 0xd
#define BDW_ULX 0xe
 
#define PCI_CHIP_VALLEYVIEW_PO 0x0f30 /* VLV PO board */
#define PCI_CHIP_VALLEYVIEW_1 0x0f31
#define PCI_CHIP_VALLEYVIEW_2 0x0f32
#define PCI_CHIP_VALLEYVIEW_3 0x0f33
 
#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \
(devid) == PCI_CHIP_I915_GM || \
(devid) == PCI_CHIP_I945_GM || \
(devid) == PCI_CHIP_I945_GME || \
(devid) == PCI_CHIP_I965_GM || \
(devid) == PCI_CHIP_I965_GME || \
(devid) == PCI_CHIP_GM45_GM || IS_IGD(devid) || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT1 || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT2)
 
#define IS_G45(devid) ((devid) == PCI_CHIP_IGD_E_G || \
(devid) == PCI_CHIP_Q45_G || \
(devid) == PCI_CHIP_G45_G || \
(devid) == PCI_CHIP_G41_G)
#define IS_GM45(devid) ((devid) == PCI_CHIP_GM45_GM)
#define IS_G4X(devid) (IS_G45(devid) || IS_GM45(devid))
 
#define IS_ILD(devid) ((devid) == PCI_CHIP_ILD_G)
#define IS_ILM(devid) ((devid) == PCI_CHIP_ILM_G)
 
#define IS_915(devid) ((devid) == PCI_CHIP_I915_G || \
(devid) == PCI_CHIP_E7221_G || \
(devid) == PCI_CHIP_I915_GM)
 
#define IS_945GM(devid) ((devid) == PCI_CHIP_I945_GM || \
(devid) == PCI_CHIP_I945_GME)
 
#define IS_945(devid) ((devid) == PCI_CHIP_I945_G || \
(devid) == PCI_CHIP_I945_GM || \
(devid) == PCI_CHIP_I945_GME || \
IS_G33(devid))
 
#define IS_G33(devid) ((devid) == PCI_CHIP_G33_G || \
(devid) == PCI_CHIP_Q33_G || \
(devid) == PCI_CHIP_Q35_G || IS_IGD(devid))
 
#define IS_GEN2(devid) ((devid) == PCI_CHIP_I830_M || \
(devid) == PCI_CHIP_845_G || \
(devid) == PCI_CHIP_I855_GM || \
(devid) == PCI_CHIP_I865_G)
 
#define IS_GEN3(devid) (IS_945(devid) || IS_915(devid))
 
#define IS_GEN4(devid) ((devid) == PCI_CHIP_I965_G || \
(devid) == PCI_CHIP_I965_Q || \
(devid) == PCI_CHIP_I965_G_1 || \
(devid) == PCI_CHIP_I965_GM || \
(devid) == PCI_CHIP_I965_GME || \
(devid) == PCI_CHIP_I946_GZ || \
IS_G4X(devid))
 
#define IS_GEN5(devid) (IS_ILD(devid) || IS_ILM(devid))
 
#define IS_GEN6(devid) ((devid) == PCI_CHIP_SANDYBRIDGE_GT1 || \
(devid) == PCI_CHIP_SANDYBRIDGE_GT2 || \
(devid) == PCI_CHIP_SANDYBRIDGE_GT2_PLUS || \
(devid) == PCI_CHIP_SANDYBRIDGE_M_GT1 || \
(devid) == PCI_CHIP_SANDYBRIDGE_M_GT2 || \
(devid) == PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS || \
(devid) == PCI_CHIP_SANDYBRIDGE_S)
 
#define IS_GEN7(devid) (IS_IVYBRIDGE(devid) || \
IS_HASWELL(devid) || \
IS_VALLEYVIEW(devid))
 
#define IS_IVYBRIDGE(devid) ((devid) == PCI_CHIP_IVYBRIDGE_GT1 || \
(devid) == PCI_CHIP_IVYBRIDGE_GT2 || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT1 || \
(devid) == PCI_CHIP_IVYBRIDGE_M_GT2 || \
(devid) == PCI_CHIP_IVYBRIDGE_S || \
(devid) == PCI_CHIP_IVYBRIDGE_S_GT2)
 
#define IS_VALLEYVIEW(devid) ((devid) == PCI_CHIP_VALLEYVIEW_PO || \
(devid) == PCI_CHIP_VALLEYVIEW_1 || \
(devid) == PCI_CHIP_VALLEYVIEW_2 || \
(devid) == PCI_CHIP_VALLEYVIEW_3)
 
#define IS_HSW_GT1(devid) ((devid) == PCI_CHIP_HASWELL_GT1 || \
(devid) == PCI_CHIP_HASWELL_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_E_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_SDV_E_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_ULT_E_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_M_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_S_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_B_GT1 || \
(devid) == PCI_CHIP_HASWELL_CRW_E_GT1)
#define IS_HSW_GT2(devid) ((devid) == PCI_CHIP_HASWELL_GT2 || \
(devid) == PCI_CHIP_HASWELL_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_E_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_SDV_E_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_ULT_E_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_M_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_S_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_B_GT2 || \
(devid) == PCI_CHIP_HASWELL_CRW_E_GT2)
#define IS_HSW_GT3(devid) ((devid) == PCI_CHIP_HASWELL_GT3 || \
(devid) == PCI_CHIP_HASWELL_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_E_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_SDV_E_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_ULT_E_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_M_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_S_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_B_GT3 || \
(devid) == PCI_CHIP_HASWELL_CRW_E_GT3)
 
#define IS_HASWELL(devid) (IS_HSW_GT1(devid) || \
IS_HSW_GT2(devid) || \
IS_HSW_GT3(devid))
 
#define IS_BROADWELL(devid) (((devid & 0xff00) != 0x1600) ? 0 : \
(((devid & 0x00f0) >> 4) > 3) ? 0 : \
((devid & 0x000f) == BDW_SPARE) ? 1 : \
((devid & 0x000f) == BDW_ULT) ? 1 : \
((devid & 0x000f) == BDW_IRIS) ? 1 : \
((devid & 0x000f) == BDW_SERVER) ? 1 : \
((devid & 0x000f) == BDW_WORKSTATION) ? 1 : \
((devid & 0x000f) == BDW_ULX) ? 1 : 0)
 
 
#define IS_GEN8(devid) IS_BROADWELL(devid)
 
#define IS_9XX(dev) (IS_GEN3(dev) || \
IS_GEN4(dev) || \
IS_GEN5(dev) || \
IS_GEN6(dev) || \
IS_GEN7(dev) || \
IS_GEN8(dev))
 
 
#endif /* _INTEL_CHIPSET_H */
/contrib/sdk/sources/libdrm/libdrm.ver
0,0 → 1,7
LIBDRM {
global: DllStartup;
*;
local: __chkstk;
__chkstk_ms;
_alloca;
};
/contrib/sdk/sources/libdrm/libdrm_lists.h
0,0 → 1,118
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND. USA.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
 
/*
* List macros heavily inspired by the Linux kernel
* list handling. No list looping yet.
*/
 
#include <stddef.h>
 
typedef struct _drmMMListHead
{
struct _drmMMListHead *prev;
struct _drmMMListHead *next;
} drmMMListHead;
 
#define DRMINITLISTHEAD(__item) \
do{ \
(__item)->prev = (__item); \
(__item)->next = (__item); \
} while (0)
 
#define DRMLISTADD(__item, __list) \
do { \
(__item)->prev = (__list); \
(__item)->next = (__list)->next; \
(__list)->next->prev = (__item); \
(__list)->next = (__item); \
} while (0)
 
#define DRMLISTADDTAIL(__item, __list) \
do { \
(__item)->next = (__list); \
(__item)->prev = (__list)->prev; \
(__list)->prev->next = (__item); \
(__list)->prev = (__item); \
} while(0)
 
#define DRMLISTDEL(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
} while(0)
 
#define DRMLISTDELINIT(__item) \
do { \
(__item)->prev->next = (__item)->next; \
(__item)->next->prev = (__item)->prev; \
(__item)->next = (__item); \
(__item)->prev = (__item); \
} while(0)
 
#define DRMLISTENTRY(__type, __item, __field) \
((__type *)(((char *) (__item)) - offsetof(__type, __field)))
 
#define DRMLISTEMPTY(__item) ((__item)->next == (__item))
 
#define DRMLISTSINGLE(__list) \
(!DRMLISTEMPTY(__list) && ((__list)->next == (__list)->prev))
 
#define DRMLISTFOREACH(__item, __list) \
for ((__item) = (__list)->next; \
(__item) != (__list); (__item) = (__item)->next)
 
#define DRMLISTFOREACHSAFE(__item, __temp, __list) \
for ((__item) = (__list)->next, (__temp) = (__item)->next; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->next)
 
#define DRMLISTFOREACHSAFEREVERSE(__item, __temp, __list) \
for ((__item) = (__list)->prev, (__temp) = (__item)->prev; \
(__item) != (__list); \
(__item) = (__temp), (__temp) = (__item)->prev)
 
#define DRMLISTFOREACHENTRY(__item, __list, __head) \
for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head); \
&(__item)->__head != (__list); \
(__item) = DRMLISTENTRY(typeof(*__item), \
(__item)->__head.next, __head))
 
#define DRMLISTFOREACHENTRYSAFE(__item, __temp, __list, __head) \
for ((__item) = DRMLISTENTRY(typeof(*__item), (__list)->next, __head), \
(__temp) = DRMLISTENTRY(typeof(*__item), \
(__item)->__head.next, __head); \
&(__item)->__head != (__list); \
(__item) = (__temp), \
(__temp) = DRMLISTENTRY(typeof(*__item), \
(__temp)->__head.next, __head))
 
#define DRMLISTJOIN(__list, __join) if (!DRMLISTEMPTY(__list)) { \
(__list)->next->prev = (__join); \
(__list)->prev->next = (__join)->next; \
(__join)->next->prev = (__list)->prev; \
(__join)->next = (__list)->next; \
}
/contrib/sdk/sources/libdrm/xf86atomic.h
0,0 → 1,97
/*
* Copyright © 2009 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Chris Wilson <chris@chris-wilson.co.uk>
*
*/
 
/**
* @file xf86atomics.h
*
* Private definitions for atomic operations
*/
 
#ifndef LIBDRM_ATOMICS_H
#define LIBDRM_ATOMICS_H
 
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
 
 
#define HAS_ATOMIC_OPS 1
 
typedef struct {
int atomic;
} atomic_t;
 
# define atomic_read(x) ((x)->atomic)
# define atomic_set(x, val) ((x)->atomic = (val))
# define atomic_inc(x) ((void) __sync_fetch_and_add (&(x)->atomic, 1))
# define atomic_dec_and_test(x) (__sync_fetch_and_add (&(x)->atomic, -1) == 1)
# define atomic_add(x, v) ((void) __sync_add_and_fetch(&(x)->atomic, (v)))
# define atomic_dec(x, v) ((void) __sync_sub_and_fetch(&(x)->atomic, (v)))
# define atomic_cmpxchg(x, oldv, newv) __sync_val_compare_and_swap (&(x)->atomic, oldv, newv)
 
 
#if HAVE_LIB_ATOMIC_OPS
#include <atomic_ops.h>
 
#define HAS_ATOMIC_OPS 1
 
typedef struct {
AO_t atomic;
} atomic_t;
 
# define atomic_read(x) AO_load_full(&(x)->atomic)
# define atomic_set(x, val) AO_store_full(&(x)->atomic, (val))
# define atomic_inc(x) ((void) AO_fetch_and_add1_full(&(x)->atomic))
# define atomic_add(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, (v)))
# define atomic_dec(x, v) ((void) AO_fetch_and_add_full(&(x)->atomic, -(v)))
# define atomic_dec_and_test(x) (AO_fetch_and_sub1_full(&(x)->atomic) == 1)
# define atomic_cmpxchg(x, oldv, newv) AO_compare_and_swap_full(&(x)->atomic, oldv, newv)
 
#endif
 
#if defined(__sun) && !defined(HAS_ATOMIC_OPS) /* Solaris & OpenSolaris */
 
#include <sys/atomic.h>
#define HAS_ATOMIC_OPS 1
 
typedef struct { uint_t atomic; } atomic_t;
 
# define atomic_read(x) (int) ((x)->atomic)
# define atomic_set(x, val) ((x)->atomic = (uint_t)(val))
# define atomic_inc(x) (atomic_inc_uint (&(x)->atomic))
# define atomic_dec_and_test(x) (atomic_dec_uint_nv(&(x)->atomic) == 1)
# define atomic_add(x, v) (atomic_add_int(&(x)->atomic, (v)))
# define atomic_dec(x, v) (atomic_add_int(&(x)->atomic, -(v)))
# define atomic_cmpxchg(x, oldv, newv) atomic_cas_uint (&(x)->atomic, oldv, newv)
 
#endif
 
#if ! HAS_ATOMIC_OPS
#error libdrm requires atomic operations, please define them for your CPU/compiler.
#endif
 
#endif
/contrib/sdk/sources/libdrm/xf86drm.c
0,0 → 1,105
/**
* \file xf86drm.c
* User-level interface to DRM device
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
* \author Kevin E. Martin <martin@valinux.com>
*/
 
/*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
 
#ifdef HAVE_CONFIG_H
# include <config.h>
#endif
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <ctype.h>
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <stdarg.h>
 
#include "xf86drm.h"
#include <kos32sys.h>
 
#ifndef DRM_MAJOR
#define DRM_MAJOR 226 /* Linux */
#endif
 
 
int drmGetMagic(int fd, drm_magic_t * magic)
{
drm_auth_t auth;
 
*magic = 1;
// if (drmIoctl(fd, DRM_IOCTL_GET_MAGIC, &auth))
// return -errno;
// *magic = auth.magic;
return 0;
}
 
void drmFreeVersion(drmVersionPtr v)
{
if (!v)
return;
// drmFree(v->name);
// drmFree(v->date);
// drmFree(v->desc);
free(v);
}
drmVersionPtr drmGetVersion(int fd)
{
drmVersionPtr v;
 
v = malloc(sizeof(*v));
 
v->version_major = 1;
v->version_minor = 6;
v->version_patchlevel = 0;
v->name_len = 4;
v->name = "i915";
v->date_len = 8;
v->date = "20080730";
v->desc_len = 14;
v->desc = "Intel Graphics";
return v;
}
 
int drmIoctl(int fd, unsigned long request, void *arg)
{
ioctl_t io;
 
io.handle = fd;
io.io_code = request;
io.input = arg;
io.inp_size = 64;
io.output = NULL;
io.out_size = 0;
 
return call_service(&io);
}
/contrib/sdk/sources/libdrm/xf86drm.h
0,0 → 1,725
/**
* \file xf86drm.h
* OS-independent header for DRM user-level library interface.
*
* \author Rickard E. (Rik) Faith <faith@valinux.com>
*/
/*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
 
#ifndef _XF86DRM_H_
#define _XF86DRM_H_
 
#include <stdarg.h>
#include <sys/types.h>
#include <stdint.h>
#include <drm.h>
 
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
 
#ifndef DRM_MAX_MINOR
#define DRM_MAX_MINOR 16
#endif
 
#define DRM_IOCTL_NR(n) _IOC_NR(n)
#define DRM_IOC_VOID _IOC_NONE
#define DRM_IOC_READ _IOC_READ
#define DRM_IOC_WRITE _IOC_WRITE
#define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
 
 
/* Defaults, if nothing set in xf86config */
#define DRM_DEV_UID 0
#define DRM_DEV_GID 0
/* Default /dev/dri directory permissions 0755 */
#define DRM_DEV_DIRMODE \
(S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH)
#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
 
#define DRM_DIR_NAME "/dev/dri"
#define DRM_DEV_NAME "%s/card%d"
#define DRM_CONTROL_DEV_NAME "%s/controlD%d"
#define DRM_PROC_NAME "/proc/dri/" /* For backward Linux compatibility */
 
#define DRM_ERR_NO_DEVICE (-1001)
#define DRM_ERR_NO_ACCESS (-1002)
#define DRM_ERR_NOT_ROOT (-1003)
#define DRM_ERR_INVALID (-1004)
#define DRM_ERR_NO_FD (-1005)
 
#define DRM_AGP_NO_HANDLE 0
 
typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */
typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */
 
typedef struct _drmServerInfo {
int (*debug_print)(const char *format, va_list ap);
int (*load_module)(const char *name);
} drmServerInfo, *drmServerInfoPtr;
 
typedef struct drmHashEntry {
int fd;
void (*f)(int, void *, void *);
void *tagTable;
} drmHashEntry;
 
extern int drmIoctl(int fd, unsigned long request, void *arg);
extern void *drmGetHashTable(void);
extern drmHashEntry *drmGetEntry(int fd);
 
/**
* Driver version information.
*
* \sa drmGetVersion() and drmSetVersion().
*/
typedef struct _drmVersion {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
int name_len; /**< Length of name buffer */
char *name; /**< Name of driver */
int date_len; /**< Length of date buffer */
char *date; /**< User-space buffer to hold date */
int desc_len; /**< Length of desc buffer */
char *desc; /**< User-space buffer to hold desc */
} drmVersion, *drmVersionPtr;
 
typedef struct _drmStats {
unsigned long count; /**< Number of data */
struct {
unsigned long value; /**< Value from kernel */
const char *long_format; /**< Suggested format for long_name */
const char *long_name; /**< Long name for value */
const char *rate_format; /**< Suggested format for rate_name */
const char *rate_name; /**< Short name for value per second */
int isvalue; /**< True if value (vs. counter) */
const char *mult_names; /**< Multiplier names (e.g., "KGM") */
int mult; /**< Multiplier value (e.g., 1024) */
int verbose; /**< Suggest only in verbose output */
} data[15];
} drmStatsT;
 
 
/* All of these enums *MUST* match with the
kernel implementation -- so do *NOT*
change them! (The drmlib implementation
will just copy the flags instead of
translating them.) */
typedef enum {
DRM_FRAME_BUFFER = 0, /**< WC, no caching, no core dump */
DRM_REGISTERS = 1, /**< no caching, no core dump */
DRM_SHM = 2, /**< shared, cached */
DRM_AGP = 3, /**< AGP/GART */
DRM_SCATTER_GATHER = 4, /**< PCI scatter/gather */
DRM_CONSISTENT = 5 /**< PCI consistent */
} drmMapType;
 
typedef enum {
DRM_RESTRICTED = 0x0001, /**< Cannot be mapped to client-virtual */
DRM_READ_ONLY = 0x0002, /**< Read-only in client-virtual */
DRM_LOCKED = 0x0004, /**< Physical pages locked */
DRM_KERNEL = 0x0008, /**< Kernel requires access */
DRM_WRITE_COMBINING = 0x0010, /**< Use write-combining, if available */
DRM_CONTAINS_LOCK = 0x0020, /**< SHM page that contains lock */
DRM_REMOVABLE = 0x0040 /**< Removable mapping */
} drmMapFlags;
 
/**
* \warning These values *MUST* match drm.h
*/
typedef enum {
/** \name Flags for DMA buffer dispatch */
/*@{*/
DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
*
* \note the buffer may not yet have been
* processed by the hardware -- getting a
* hardware lock with the hardware quiescent
* will ensure that the buffer has been
* processed.
*/
DRM_DMA_WHILE_LOCKED = 0x02, /**< Dispatch while lock held */
DRM_DMA_PRIORITY = 0x04, /**< High priority dispatch */
/*@}*/
 
/** \name Flags for DMA buffer request */
/*@{*/
DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
/*@}*/
} drmDMAFlags;
 
typedef enum {
DRM_PAGE_ALIGN = 0x01,
DRM_AGP_BUFFER = 0x02,
DRM_SG_BUFFER = 0x04,
DRM_FB_BUFFER = 0x08,
DRM_PCI_BUFFER_RO = 0x10
} drmBufDescFlags;
 
typedef enum {
DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
DRM_LOCK_FLUSH_ALL = 0x08, /**< Flush all DMA queues first */
/* These *HALT* flags aren't supported yet
-- they will be used to support the
full-screen DGA-like mode. */
DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
} drmLockFlags;
 
typedef enum {
DRM_CONTEXT_PRESERVED = 0x01, /**< This context is preserved and
never swapped. */
DRM_CONTEXT_2DONLY = 0x02 /**< This context is for 2D rendering only. */
} drm_context_tFlags, *drm_context_tFlagsPtr;
 
typedef struct _drmBufDesc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
} drmBufDesc, *drmBufDescPtr;
 
typedef struct _drmBufInfo {
int count; /**< Number of buffers described in list */
drmBufDescPtr list; /**< List of buffer descriptions */
} drmBufInfo, *drmBufInfoPtr;
 
typedef struct _drmBuf {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
drmAddress address; /**< Address */
} drmBuf, *drmBufPtr;
 
/**
* Buffer mapping information.
*
* Used by drmMapBufs() and drmUnmapBufs() to store information about the
* mapped buffers.
*/
typedef struct _drmBufMap {
int count; /**< Number of buffers mapped */
drmBufPtr list; /**< Buffers */
} drmBufMap, *drmBufMapPtr;
 
typedef struct _drmLock {
volatile unsigned int lock;
char padding[60];
/* This is big enough for most current (and future?) architectures:
DEC Alpha: 32 bytes
Intel Merced: ?
Intel P5/PPro/PII/PIII: 32 bytes
Intel StrongARM: 32 bytes
Intel i386/i486: 16 bytes
MIPS: 32 bytes (?)
Motorola 68k: 16 bytes
Motorola PowerPC: 32 bytes
Sun SPARC: 32 bytes
*/
} drmLock, *drmLockPtr;
 
/**
* Indices here refer to the offset into
* list in drmBufInfo
*/
typedef struct _drmDMAReq {
drm_context_t context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int *send_list; /**< List of handles to buffers */
int *send_sizes; /**< Lengths of data to send, in bytes */
drmDMAFlags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size of buffers requested */
int *request_list; /**< Buffer information */
int *request_sizes; /**< Minimum acceptable sizes */
int granted_count; /**< Number of buffers granted at this size */
} drmDMAReq, *drmDMAReqPtr;
 
typedef struct _drmRegion {
drm_handle_t handle;
unsigned int offset;
drmSize size;
drmAddress map;
} drmRegion, *drmRegionPtr;
 
typedef struct _drmTextureRegion {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding; /**< Explicitly pad this out */
unsigned int age;
} drmTextureRegion, *drmTextureRegionPtr;
 
 
typedef enum {
DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
/* bits 1-6 are reserved for high crtcs */
DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e,
DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
DRM_VBLANK_SIGNAL = 0x40000000 /* Send signal instead of blocking */
} drmVBlankSeqType;
#define DRM_VBLANK_HIGH_CRTC_SHIFT 1
 
typedef struct _drmVBlankReq {
drmVBlankSeqType type;
unsigned int sequence;
unsigned long signal;
} drmVBlankReq, *drmVBlankReqPtr;
 
typedef struct _drmVBlankReply {
drmVBlankSeqType type;
unsigned int sequence;
long tval_sec;
long tval_usec;
} drmVBlankReply, *drmVBlankReplyPtr;
 
typedef union _drmVBlank {
drmVBlankReq request;
drmVBlankReply reply;
} drmVBlank, *drmVBlankPtr;
 
typedef struct _drmSetVersion {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
} drmSetVersion, *drmSetVersionPtr;
 
#define __drm_dummy_lock(lock) (*(__volatile__ unsigned int *)lock)
 
#define DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
#define DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
 
#if defined(__GNUC__) && (__GNUC__ >= 2)
# if defined(__i386) || defined(__AMD64__) || defined(__x86_64__) || defined(__amd64__)
/* Reflect changes here to drmP.h */
#define DRM_CAS(lock,old,new,__ret) \
do { \
int __dummy; /* Can't mark eax as clobbered */ \
__asm__ __volatile__( \
"lock ; cmpxchg %4,%1\n\t" \
"setnz %0" \
: "=d" (__ret), \
"=m" (__drm_dummy_lock(lock)), \
"=a" (__dummy) \
: "2" (old), \
"r" (new)); \
} while (0)
 
#elif defined(__alpha__)
 
#define DRM_CAS(lock, old, new, ret) \
do { \
int tmp, old32; \
__asm__ __volatile__( \
" addl $31, %5, %3\n" \
"1: ldl_l %0, %2\n" \
" cmpeq %0, %3, %1\n" \
" beq %1, 2f\n" \
" mov %4, %0\n" \
" stl_c %0, %2\n" \
" beq %0, 3f\n" \
" mb\n" \
"2: cmpeq %1, 0, %1\n" \
".subsection 2\n" \
"3: br 1b\n" \
".previous" \
: "=&r"(tmp), "=&r"(ret), \
"=m"(__drm_dummy_lock(lock)), \
"=&r"(old32) \
: "r"(new), "r"(old) \
: "memory"); \
} while (0)
 
#elif defined(__sparc__)
 
#define DRM_CAS(lock,old,new,__ret) \
do { register unsigned int __old __asm("o0"); \
register unsigned int __new __asm("o1"); \
register volatile unsigned int *__lock __asm("o2"); \
__old = old; \
__new = new; \
__lock = (volatile unsigned int *)lock; \
__asm__ __volatile__( \
/*"cas [%2], %3, %0"*/ \
".word 0xd3e29008\n\t" \
/*"membar #StoreStore | #StoreLoad"*/ \
".word 0x8143e00a" \
: "=&r" (__new) \
: "0" (__new), \
"r" (__lock), \
"r" (__old) \
: "memory"); \
__ret = (__new != __old); \
} while(0)
 
#elif defined(__ia64__)
 
#ifdef __INTEL_COMPILER
/* this currently generates bad code (missing stop bits)... */
#include <ia64intrin.h>
 
#define DRM_CAS(lock,old,new,__ret) \
do { \
unsigned long __result, __old = (old) & 0xffffffff; \
__mf(); \
__result = _InterlockedCompareExchange_acq(&__drm_dummy_lock(lock), (new), __old);\
__ret = (__result) != (__old); \
/* __ret = (__sync_val_compare_and_swap(&__drm_dummy_lock(lock), \
(old), (new)) \
!= (old)); */\
} while (0)
 
#else
#define DRM_CAS(lock,old,new,__ret) \
do { \
unsigned int __result, __old = (old); \
__asm__ __volatile__( \
"mf\n" \
"mov ar.ccv=%2\n" \
";;\n" \
"cmpxchg4.acq %0=%1,%3,ar.ccv" \
: "=r" (__result), "=m" (__drm_dummy_lock(lock)) \
: "r" ((unsigned long)__old), "r" (new) \
: "memory"); \
__ret = (__result) != (__old); \
} while (0)
 
#endif
 
#elif defined(__powerpc__)
 
#define DRM_CAS(lock,old,new,__ret) \
do { \
__asm__ __volatile__( \
"sync;" \
"0: lwarx %0,0,%1;" \
" xor. %0,%3,%0;" \
" bne 1f;" \
" stwcx. %2,0,%1;" \
" bne- 0b;" \
"1: " \
"sync;" \
: "=&r"(__ret) \
: "r"(lock), "r"(new), "r"(old) \
: "cr0", "memory"); \
} while (0)
 
#endif /* architecture */
#endif /* __GNUC__ >= 2 */
 
#ifndef DRM_CAS
#define DRM_CAS(lock,old,new,ret) do { ret=1; } while (0) /* FAST LOCK FAILS */
#endif
 
#if defined(__alpha__)
#define DRM_CAS_RESULT(_result) long _result
#elif defined(__powerpc__)
#define DRM_CAS_RESULT(_result) int _result
#else
#define DRM_CAS_RESULT(_result) char _result
#endif
 
#define DRM_LIGHT_LOCK(fd,lock,context) \
do { \
DRM_CAS_RESULT(__ret); \
DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
if (__ret) drmGetLock(fd,context,0); \
} while(0)
 
/* This one counts fast locks -- for
benchmarking only. */
#define DRM_LIGHT_LOCK_COUNT(fd,lock,context,count) \
do { \
DRM_CAS_RESULT(__ret); \
DRM_CAS(lock,context,DRM_LOCK_HELD|context,__ret); \
if (__ret) drmGetLock(fd,context,0); \
else ++count; \
} while(0)
 
#define DRM_LOCK(fd,lock,context,flags) \
do { \
if (flags) drmGetLock(fd,context,flags); \
else DRM_LIGHT_LOCK(fd,lock,context); \
} while(0)
 
#define DRM_UNLOCK(fd,lock,context) \
do { \
DRM_CAS_RESULT(__ret); \
DRM_CAS(lock,DRM_LOCK_HELD|context,context,__ret); \
if (__ret) drmUnlock(fd,context); \
} while(0)
 
/* Simple spin locks */
#define DRM_SPINLOCK(spin,val) \
do { \
DRM_CAS_RESULT(__ret); \
do { \
DRM_CAS(spin,0,val,__ret); \
if (__ret) while ((spin)->lock); \
} while (__ret); \
} while(0)
 
#define DRM_SPINLOCK_TAKE(spin,val) \
do { \
DRM_CAS_RESULT(__ret); \
int cur; \
do { \
cur = (*spin).lock; \
DRM_CAS(spin,cur,val,__ret); \
} while (__ret); \
} while(0)
 
#define DRM_SPINLOCK_COUNT(spin,val,count,__ret) \
do { \
int __i; \
__ret = 1; \
for (__i = 0; __ret && __i < count; __i++) { \
DRM_CAS(spin,0,val,__ret); \
if (__ret) for (;__i < count && (spin)->lock; __i++); \
} \
} while(0)
 
#define DRM_SPINUNLOCK(spin,val) \
do { \
DRM_CAS_RESULT(__ret); \
if ((*spin).lock == val) { /* else server stole lock */ \
do { \
DRM_CAS(spin,val,0,__ret); \
} while (__ret); \
} \
} while(0)
 
 
 
/* General user-level programmer's API: unprivileged */
extern int drmAvailable(void);
extern int drmOpen(const char *name, const char *busid);
extern int drmOpenControl(int minor);
extern int drmClose(int fd);
extern drmVersionPtr drmGetVersion(int fd);
extern drmVersionPtr drmGetLibVersion(int fd);
extern int drmGetCap(int fd, uint64_t capability, uint64_t *value);
extern void drmFreeVersion(drmVersionPtr);
extern int drmGetMagic(int fd, drm_magic_t * magic);
extern char *drmGetBusid(int fd);
extern int drmGetInterruptFromBusID(int fd, int busnum, int devnum,
int funcnum);
extern int drmGetMap(int fd, int idx, drm_handle_t *offset,
drmSize *size, drmMapType *type,
drmMapFlags *flags, drm_handle_t *handle,
int *mtrr);
extern int drmGetClient(int fd, int idx, int *auth, int *pid,
int *uid, unsigned long *magic,
unsigned long *iocs);
extern int drmGetStats(int fd, drmStatsT *stats);
extern int drmSetInterfaceVersion(int fd, drmSetVersion *version);
extern int drmCommandNone(int fd, unsigned long drmCommandIndex);
extern int drmCommandRead(int fd, unsigned long drmCommandIndex,
void *data, unsigned long size);
extern int drmCommandWrite(int fd, unsigned long drmCommandIndex,
void *data, unsigned long size);
extern int drmCommandWriteRead(int fd, unsigned long drmCommandIndex,
void *data, unsigned long size);
 
/* General user-level programmer's API: X server (root) only */
extern void drmFreeBusid(const char *busid);
extern int drmSetBusid(int fd, const char *busid);
extern int drmAuthMagic(int fd, drm_magic_t magic);
extern int drmAddMap(int fd,
drm_handle_t offset,
drmSize size,
drmMapType type,
drmMapFlags flags,
drm_handle_t * handle);
extern int drmRmMap(int fd, drm_handle_t handle);
extern int drmAddContextPrivateMapping(int fd, drm_context_t ctx_id,
drm_handle_t handle);
 
extern int drmAddBufs(int fd, int count, int size,
drmBufDescFlags flags,
int agp_offset);
extern int drmMarkBufs(int fd, double low, double high);
extern int drmCreateContext(int fd, drm_context_t * handle);
extern int drmSetContextFlags(int fd, drm_context_t context,
drm_context_tFlags flags);
extern int drmGetContextFlags(int fd, drm_context_t context,
drm_context_tFlagsPtr flags);
extern int drmAddContextTag(int fd, drm_context_t context, void *tag);
extern int drmDelContextTag(int fd, drm_context_t context);
extern void *drmGetContextTag(int fd, drm_context_t context);
extern drm_context_t * drmGetReservedContextList(int fd, int *count);
extern void drmFreeReservedContextList(drm_context_t *);
extern int drmSwitchToContext(int fd, drm_context_t context);
extern int drmDestroyContext(int fd, drm_context_t handle);
extern int drmCreateDrawable(int fd, drm_drawable_t * handle);
extern int drmDestroyDrawable(int fd, drm_drawable_t handle);
extern int drmUpdateDrawableInfo(int fd, drm_drawable_t handle,
drm_drawable_info_type_t type,
unsigned int num, void *data);
extern int drmCtlInstHandler(int fd, int irq);
extern int drmCtlUninstHandler(int fd);
extern int drmSetClientCap(int fd, uint64_t capability,
uint64_t value);
 
/* General user-level programmer's API: authenticated client and/or X */
extern int drmMap(int fd,
drm_handle_t handle,
drmSize size,
drmAddressPtr address);
extern int drmUnmap(drmAddress address, drmSize size);
extern drmBufInfoPtr drmGetBufInfo(int fd);
extern drmBufMapPtr drmMapBufs(int fd);
extern int drmUnmapBufs(drmBufMapPtr bufs);
extern int drmDMA(int fd, drmDMAReqPtr request);
extern int drmFreeBufs(int fd, int count, int *list);
extern int drmGetLock(int fd,
drm_context_t context,
drmLockFlags flags);
extern int drmUnlock(int fd, drm_context_t context);
extern int drmFinish(int fd, int context, drmLockFlags flags);
extern int drmGetContextPrivateMapping(int fd, drm_context_t ctx_id,
drm_handle_t * handle);
 
/* AGP/GART support: X server (root) only */
extern int drmAgpAcquire(int fd);
extern int drmAgpRelease(int fd);
extern int drmAgpEnable(int fd, unsigned long mode);
extern int drmAgpAlloc(int fd, unsigned long size,
unsigned long type, unsigned long *address,
drm_handle_t *handle);
extern int drmAgpFree(int fd, drm_handle_t handle);
extern int drmAgpBind(int fd, drm_handle_t handle,
unsigned long offset);
extern int drmAgpUnbind(int fd, drm_handle_t handle);
 
/* AGP/GART info: authenticated client and/or X */
extern int drmAgpVersionMajor(int fd);
extern int drmAgpVersionMinor(int fd);
extern unsigned long drmAgpGetMode(int fd);
extern unsigned long drmAgpBase(int fd); /* Physical location */
extern unsigned long drmAgpSize(int fd); /* Bytes */
extern unsigned long drmAgpMemoryUsed(int fd);
extern unsigned long drmAgpMemoryAvail(int fd);
extern unsigned int drmAgpVendorId(int fd);
extern unsigned int drmAgpDeviceId(int fd);
 
/* PCI scatter/gather support: X server (root) only */
extern int drmScatterGatherAlloc(int fd, unsigned long size,
drm_handle_t *handle);
extern int drmScatterGatherFree(int fd, drm_handle_t handle);
 
extern int drmWaitVBlank(int fd, drmVBlankPtr vbl);
 
/* Support routines */
extern void drmSetServerInfo(drmServerInfoPtr info);
extern int drmError(int err, const char *label);
extern void *drmMalloc(int size);
extern void drmFree(void *pt);
 
/* Hash table routines */
extern void *drmHashCreate(void);
extern int drmHashDestroy(void *t);
extern int drmHashLookup(void *t, unsigned long key, void **value);
extern int drmHashInsert(void *t, unsigned long key, void *value);
extern int drmHashDelete(void *t, unsigned long key);
extern int drmHashFirst(void *t, unsigned long *key, void **value);
extern int drmHashNext(void *t, unsigned long *key, void **value);
 
/* PRNG routines */
extern void *drmRandomCreate(unsigned long seed);
extern int drmRandomDestroy(void *state);
extern unsigned long drmRandom(void *state);
extern double drmRandomDouble(void *state);
 
/* Skip list routines */
 
extern void *drmSLCreate(void);
extern int drmSLDestroy(void *l);
extern int drmSLLookup(void *l, unsigned long key, void **value);
extern int drmSLInsert(void *l, unsigned long key, void *value);
extern int drmSLDelete(void *l, unsigned long key);
extern int drmSLNext(void *l, unsigned long *key, void **value);
extern int drmSLFirst(void *l, unsigned long *key, void **value);
extern void drmSLDump(void *l);
extern int drmSLLookupNeighbors(void *l, unsigned long key,
unsigned long *prev_key, void **prev_value,
unsigned long *next_key, void **next_value);
 
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened);
extern void drmCloseOnce(int fd);
extern void drmMsg(const char *format, ...);
 
extern int drmSetMaster(int fd);
extern int drmDropMaster(int fd);
 
#define DRM_EVENT_CONTEXT_VERSION 2
 
typedef struct _drmEventContext {
 
/* This struct is versioned so we can add more pointers if we
* add more events. */
int version;
 
void (*vblank_handler)(int fd,
unsigned int sequence,
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data);
 
void (*page_flip_handler)(int fd,
unsigned int sequence,
unsigned int tv_sec,
unsigned int tv_usec,
void *user_data);
 
} drmEventContext, *drmEventContextPtr;
 
extern int drmHandleEvent(int fd, drmEventContextPtr evctx);
 
extern char *drmGetDeviceNameFromFd(int fd);
 
extern int drmPrimeHandleToFD(int fd, uint32_t handle, uint32_t flags, int *prime_fd);
extern int drmPrimeFDToHandle(int fd, int prime_fd, uint32_t *handle);
 
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
 
#endif