/contrib/sdk/sources/newlib/libc/include/kos32sys.h |
---|
441,29 → 441,11 |
int stride; |
}; |
static inline void Blit(void *bitmap, int dst_x, int dst_y, |
void Blit(void *bitmap, int dst_x, int dst_y, |
int src_x, int src_y, int w, int h, |
int src_w, int src_h, int stride) |
{ |
volatile struct blit_call bc; |
int src_w, int src_h, int stride); |
bc.dstx = dst_x; |
bc.dsty = dst_y; |
bc.w = w; |
bc.h = h; |
bc.srcx = src_x; |
bc.srcy = src_y; |
bc.srcw = src_w; |
bc.srch = src_h; |
bc.stride = stride; |
bc.bitmap = bitmap; |
__asm__ __volatile__( |
"int $0x40" |
::"a"(73),"b"(0),"c"(&bc.dstx)); |
}; |
#endif |
/contrib/sdk/sources/libdrm/include/drm/drm.h |
---|
427,7 → 427,7 |
* DRM_IOCTL_UPDATE_DRAW ioctl argument type. |
*/ |
typedef enum { |
DRM_DRAWABLE_CLIPRECTS |
DRM_DRAWABLE_CLIPRECTS, |
} drm_drawable_info_type_t; |
struct drm_update_draw { |
459,6 → 459,8 |
enum drm_vblank_seq_type { |
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ |
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ |
/* bits 1-6 are reserved for high crtcs */ |
_DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, |
_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ |
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ |
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ |
465,6 → 467,7 |
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ |
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ |
}; |
#define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 |
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) |
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ |
619,14 → 622,6 |
*/ |
#define DRM_CLIENT_CAP_STEREO_3D 1 |
/** |
* DRM_CLIENT_CAP_UNIVERSAL_PLANES |
* |
* if set to 1, the DRM core will expose the full universal plane list |
* (including primary and cursor planes). |
*/ |
#define DRM_CLIENT_CAP_UNIVERSAL_PLANES 2 |
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */ |
struct drm_set_client_cap { |
__u64 capability; |
/contrib/sdk/sources/libdrm/include/drm/drm_fourcc.h |
---|
106,7 → 106,12 |
#define DRM_FORMAT_NV21 fourcc_code('N', 'V', '2', '1') /* 2x2 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ |
#define DRM_FORMAT_NV24 fourcc_code('N', 'V', '2', '4') /* non-subsampled Cr:Cb plane */ |
#define DRM_FORMAT_NV42 fourcc_code('N', 'V', '4', '2') /* non-subsampled Cb:Cr plane */ |
/* special NV12 tiled format */ |
#define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ |
/* |
* 3 plane YCbCr |
* index 0: Y plane, [7:0] Y |
/contrib/sdk/sources/libdrm/include/drm/drm_mode.h |
---|
173,6 → 173,9 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
#define DRM_MODE_ENCODER_VIRTUAL 5 |
#define DRM_MODE_ENCODER_DSI 6 |
#define DRM_MODE_ENCODER_DPMST 7 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
210,6 → 213,8 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
#define DRM_MODE_CONNECTOR_TV 13 |
#define DRM_MODE_CONNECTOR_eDP 14 |
#define DRM_MODE_CONNECTOR_VIRTUAL 15 |
#define DRM_MODE_CONNECTOR_DSI 16 |
struct drm_mode_get_connector { |
230,6 → 235,8 |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
__u32 pad; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
239,6 → 246,21 |
#define DRM_MODE_PROP_BLOB (1<<4) |
#define DRM_MODE_PROP_BITMASK (1<<5) /* bitmask of enumerated types */ |
/* non-extended types: legacy bitmask, one bit per type: */ |
#define DRM_MODE_PROP_LEGACY_TYPE ( \ |
DRM_MODE_PROP_RANGE | \ |
DRM_MODE_PROP_ENUM | \ |
DRM_MODE_PROP_BLOB | \ |
DRM_MODE_PROP_BITMASK) |
/* extended-types: rather than continue to consume a bit per type, |
* grab a chunk of the bits to use as integer type id. |
*/ |
#define DRM_MODE_PROP_EXTENDED_TYPE 0x0000ffc0 |
#define DRM_MODE_PROP_TYPE(n) ((n) << 6) |
#define DRM_MODE_PROP_OBJECT DRM_MODE_PROP_TYPE(1) |
#define DRM_MODE_PROP_SIGNED_RANGE DRM_MODE_PROP_TYPE(2) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
262,15 → 284,6 |
__u32 connector_id; |
}; |
#define DRM_MODE_OBJECT_CRTC 0xcccccccc |
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 |
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 |
#define DRM_MODE_OBJECT_MODE 0xdededede |
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 |
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb |
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb |
#define DRM_MODE_OBJECT_PLANE 0xeeeeeeee |
struct drm_mode_obj_get_properties { |
__u64 props_ptr; |
__u64 prop_values_ptr; |
333,6 → 346,8 |
#define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 |
#define DRM_MODE_FB_DIRTY_FLAGS 0x03 |
#define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 |
/* |
* Mark a region of a framebuffer as dirty. |
* |
373,20 → 388,21 |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO (1<<0) |
#define DRM_MODE_CURSOR_MOVE (1<<1) |
#define DRM_MODE_CURSOR_BO 0x01 |
#define DRM_MODE_CURSOR_MOVE 0x02 |
#define DRM_MODE_CURSOR_FLAGS 0x03 |
/* |
* depending on the value in flags diffrent members are used. |
* depending on the value in flags different members are used. |
* |
* CURSOR_BO uses |
* crtc |
* crtc_id |
* width |
* height |
* handle - if 0 turns the cursor of |
* handle - if 0 turns the cursor off |
* |
* CURSOR_MOVE uses |
* crtc |
* crtc_id |
* x |
* y |
*/ |
/contrib/sdk/sources/libdrm/include/drm/i915_drm.h |
---|
223,7 → 223,6 |
#define DRM_I915_GEM_GET_CACHING 0x30 |
#define DRM_I915_REG_READ 0x31 |
#define DRM_I915_GET_RESET_STATS 0x32 |
#define DRM_I915_GEM_USERPTR 0x33 |
#define DRM_IOCTL_I915_INIT |
#define DRM_IOCTL_I915_FLUSH |
1051,20 → 1050,6 |
__u32 pad; |
}; |
struct drm_i915_gem_userptr { |
__u64 user_ptr; |
__u64 user_size; |
__u32 flags; |
#define I915_USERPTR_READ_ONLY 0x1 |
#define I915_USERPTR_UNSYNCHRONIZED 0x80000000 |
/** |
* Returned handle for the object. |
* |
* Object handles are nonzero. |
*/ |
__u32 handle; |
}; |
struct drm_i915_mask { |
__u32 handle; |
__u32 width; |
/contrib/sdk/sources/libdrm/intel/intel_bufmgr.c |
---|
52,6 → 52,7 |
return bufmgr->bo_alloc(bufmgr, name, size, alignment); |
} |
#if 0 |
drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, |
const char *name, |
unsigned long size, |
59,6 → 60,7 |
{ |
return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment); |
} |
#endif |
drm_intel_bo * |
drm_intel_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name, |
/contrib/sdk/sources/libdrm/intel/intel_bufmgr.h |
---|
61,8 → 61,9 |
unsigned long align; |
/** |
* Deprecated field containing (possibly the low 32-bits of) the last |
* seen virtual card address. Use offset64 instead. |
* Last seen card virtual address (offset from the beginning of the |
* aperture) for the object. This should be used to fill relocation |
* entries when calling drm_intel_bo_emit_reloc() |
*/ |
unsigned long offset; |
83,13 → 84,6 |
* MM-specific handle for accessing object |
*/ |
int handle; |
/** |
* Last seen card virtual address (offset from the beginning of the |
* aperture) for the object. This should be used to fill relocation |
* entries when calling drm_intel_bo_emit_reloc() |
*/ |
uint64_t offset64; |
}; |
enum aub_dump_bmp_format { |
/contrib/sdk/sources/libdrm/intel/intel_bufmgr_gem.c |
---|
213,15 → 213,6 |
bool reusable; |
/** |
* Boolean of whether the GPU is definitely not accessing the buffer. |
* |
* This is only valid when reusable, since non-reusable |
* buffers are those that have been shared wth other |
* processes, so we don't know their state. |
*/ |
bool idle; |
/** |
* Size in bytes of this buffer and its relocation descendents. |
* |
* Used to avoid costly tree walking in |
392,7 → 383,7 |
(unsigned long long)bo_gem->relocs[j].offset, |
target_gem->gem_handle, |
target_gem->name, |
target_bo->offset64, |
target_bo->offset, |
bo_gem->relocs[j].delta); |
} |
} |
577,19 → 568,11 |
struct drm_i915_gem_busy busy; |
int ret; |
if (bo_gem->reusable && bo_gem->idle) |
return false; |
VG_CLEAR(busy); |
busy.handle = bo_gem->gem_handle; |
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy); |
if (ret == 0) { |
bo_gem->idle = !busy.busy; |
return busy.busy; |
} else { |
return false; |
} |
return (ret == 0 && busy.busy); |
} |
882,6 → 865,10 |
} |
} |
bo_gem = calloc(1, sizeof(*bo_gem)); |
if (!bo_gem) |
return NULL; |
VG_CLEAR(open_arg); |
open_arg.name = handle; |
ret = drmIoctl(bufmgr_gem->fd, |
890,29 → 877,11 |
if (ret != 0) { |
DBG("Couldn't reference %s handle 0x%08x: %s\n", |
name, handle, strerror(errno)); |
free(bo_gem); |
return NULL; |
} |
/* Now see if someone has used a prime handle to get this |
* object from the kernel before by looking through the list |
* again for a matching gem_handle |
*/ |
for (list = bufmgr_gem->named.next; |
list != &bufmgr_gem->named; |
list = list->next) { |
bo_gem = DRMLISTENTRY(drm_intel_bo_gem, list, name_list); |
if (bo_gem->gem_handle == open_arg.handle) { |
drm_intel_gem_bo_reference(&bo_gem->bo); |
return &bo_gem->bo; |
} |
} |
bo_gem = calloc(1, sizeof(*bo_gem)); |
if (!bo_gem) |
return NULL; |
bo_gem->bo.size = open_arg.size; |
bo_gem->bo.offset = 0; |
bo_gem->bo.offset64 = 0; |
bo_gem->bo.virtual = NULL; |
bo_gem->bo.bufmgr = bufmgr; |
bo_gem->name = name; |
1353,9 → 1322,6 |
int drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo) |
{ |
drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr; |
#ifdef HAVE_VALGRIND |
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
#endif |
int ret; |
/* If the CPU cache isn't coherent with the GTT, then use a |
1696,7 → 1662,7 |
target_bo_gem->gem_handle; |
bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains; |
bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain; |
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64; |
bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset; |
bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo; |
if (target_bo != bo) |
1847,12 → 1813,11 |
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo; |
/* Update the buffer offset */ |
if (bufmgr_gem->exec_objects[i].offset != bo->offset64) { |
if (bufmgr_gem->exec_objects[i].offset != bo->offset) { |
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", |
bo_gem->gem_handle, bo_gem->name, bo->offset64, |
bo_gem->gem_handle, bo_gem->name, bo->offset, |
(unsigned long long)bufmgr_gem->exec_objects[i]. |
offset); |
bo->offset64 = bufmgr_gem->exec_objects[i].offset; |
bo->offset = bufmgr_gem->exec_objects[i].offset; |
} |
} |
1868,11 → 1833,10 |
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
/* Update the buffer offset */ |
if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) { |
if (bufmgr_gem->exec2_objects[i].offset != bo->offset) { |
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n", |
bo_gem->gem_handle, bo_gem->name, bo->offset64, |
bo_gem->gem_handle, bo_gem->name, bo->offset, |
(unsigned long long)bufmgr_gem->exec2_objects[i].offset); |
bo->offset64 = bufmgr_gem->exec2_objects[i].offset; |
bo->offset = bufmgr_gem->exec2_objects[i].offset; |
} |
} |
2257,8 → 2221,6 |
drm_intel_bo *bo = bufmgr_gem->exec_bos[i]; |
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo; |
bo_gem->idle = false; |
/* Disconnect the buffer from the validate list */ |
bo_gem->validate_index = -1; |
bufmgr_gem->exec_bos[i] = NULL; |
2312,7 → 2274,6 |
if (ret != 0) |
return -errno; |
bo->offset64 = pin.offset; |
bo->offset = pin.offset; |
return 0; |
} |
2527,7 → 2488,6 |
bo_gem->global_name = flink.name; |
bo_gem->reusable = false; |
if (DRMLISTEMPTY(&bo_gem->name_list)) |
DRMLISTADDTAIL(&bo_gem->name_list, &bufmgr_gem->named); |
} |
2916,7 → 2876,7 |
aub_out(bufmgr_gem, 0); /* comment len */ |
/* Set up the GTT. The max we can handle is 256M */ |
aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | ((bufmgr_gem->gen >= 8 ? 6 : 5) - 2)); |
aub_out(bufmgr_gem, CMD_AUB_TRACE_HEADER_BLOCK | (5 - 2)); |
aub_out(bufmgr_gem, AUB_TRACE_MEMTYPE_NONLOCAL | 0 | AUB_TRACE_OP_DATA_WRITE); |
aub_out(bufmgr_gem, 0); /* subtype */ |
aub_out(bufmgr_gem, 0); /* offset */ |
2934,19 → 2894,15 |
drm_intel_context *context = NULL; |
int ret; |
context = calloc(1, sizeof(*context)); |
if (!context) |
return NULL; |
VG_CLEAR(create); |
ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create); |
if (ret != 0) { |
DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n", |
strerror(errno)); |
free(context); |
return NULL; |
} |
context = calloc(1, sizeof(*context)); |
context->ctx_id = create.ctx_id; |
context->bufmgr = bufmgr; |
3182,8 → 3138,8 |
bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2; |
bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc; |
bufmgr_gem->bufmgr.bo_alloc_for_render = |
drm_intel_gem_bo_alloc_for_render; |
// bufmgr_gem->bufmgr.bo_alloc_for_render = |
// drm_intel_gem_bo_alloc_for_render; |
bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled; |
bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference; |
bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference; |
/contrib/sdk/sources/libdrm/intel/intel_bufmgr_priv.h |
---|
56,10 → 56,10 |
* |
* This is otherwise the same as bo_alloc. |
*/ |
drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr, |
const char *name, |
unsigned long size, |
unsigned int alignment); |
// drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr, |
// const char *name, |
// unsigned long size, |
// unsigned int alignment); |
/** |
* Allocate a tiled buffer object. |
/contrib/sdk/sources/libdrm/intel/intel_chipset.h |
---|
160,11 → 160,6 |
#define PCI_CHIP_VALLEYVIEW_2 0x0f32 |
#define PCI_CHIP_VALLEYVIEW_3 0x0f33 |
#define PCI_CHIP_CHERRYVIEW_0 0x22b0 |
#define PCI_CHIP_CHERRYVIEW_1 0x22b1 |
#define PCI_CHIP_CHERRYVIEW_2 0x22b2 |
#define PCI_CHIP_CHERRYVIEW_3 0x22b3 |
#define IS_MOBILE(devid) ((devid) == PCI_CHIP_I855_GM || \ |
(devid) == PCI_CHIP_I915_GM || \ |
(devid) == PCI_CHIP_I945_GM || \ |
316,13 → 311,8 |
((devid & 0x000f) == BDW_WORKSTATION) ? 1 : \ |
((devid & 0x000f) == BDW_ULX) ? 1 : 0) |
#define IS_CHERRYVIEW(devid) ((devid) == PCI_CHIP_CHERRYVIEW_0 || \ |
(devid) == PCI_CHIP_CHERRYVIEW_1 || \ |
(devid) == PCI_CHIP_CHERRYVIEW_2 || \ |
(devid) == PCI_CHIP_CHERRYVIEW_3) |
#define IS_GEN8(devid) (IS_BROADWELL(devid) || \ |
IS_CHERRYVIEW(devid)) |
#define IS_GEN8(devid) IS_BROADWELL(devid) |
#define IS_9XX(dev) (IS_GEN3(dev) || \ |
IS_GEN4(dev) || \ |
/contrib/sdk/sources/libdrm/xf86drm.c |
---|
44,11 → 44,6 |
#include <time.h> |
#include <stdarg.h> |
/* Not all systems have MAP_FAILED defined */ |
#ifndef MAP_FAILED |
#define MAP_FAILED ((void *)-1) |
#endif |
#include "xf86drm.h" |
#include <kos32sys.h> |
/contrib/sdk/sources/libdrm/xf86drm.h |
---|
79,14 → 79,8 |
typedef unsigned int drmSize, *drmSizePtr; /**< For mapped regions */ |
typedef void *drmAddress, **drmAddressPtr; /**< For mapped regions */ |
#if (__GNUC__ >= 3) |
#define DRM_PRINTFLIKE(f, a) __attribute__ ((format(__printf__, f, a))) |
#else |
#define DRM_PRINTFLIKE(f, a) |
#endif |
typedef struct _drmServerInfo { |
int (*debug_print)(const char *format, va_list ap) DRM_PRINTFLIKE(1,0); |
int (*debug_print)(const char *format, va_list ap); |
int (*load_module)(const char *name); |
} drmServerInfo, *drmServerInfoPtr; |
690,7 → 684,7 |
extern int drmOpenOnce(void *unused, const char *BusID, int *newlyopened); |
extern void drmCloseOnce(int fd); |
extern void drmMsg(const char *format, ...) DRM_PRINTFLIKE(1, 2); |
extern void drmMsg(const char *format, ...); |
extern int drmSetMaster(int fd); |
extern int drmDropMaster(int fd); |