Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 4357 → Rev 4358

/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/Android.mk
0,0 → 1,40
# Mesa 3-D graphics library
#
# Copyright (C) 2011 Chia-I Wu <olvaffe@gmail.com>
# Copyright (C) 2011 LunarG Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
 
LOCAL_PATH := $(call my-dir)
 
# get C_SOURCES
include $(LOCAL_PATH)/Makefile.sources
 
include $(CLEAR_VARS)
 
LOCAL_SRC_FILES := $(C_SOURCES)
 
LOCAL_C_INCLUDES := \
$(DRM_TOP) \
$(DRM_TOP)/include/drm
 
LOCAL_MODULE := libmesa_winsys_radeon
 
include $(GALLIUM_COMMON_MK)
include $(BUILD_STATIC_LIBRARY)
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/Makefile.am
0,0 → 1,11
include Makefile.sources
include $(top_srcdir)/src/gallium/Automake.inc
 
AM_CFLAGS = \
-I$(top_srcdir)/include \
$(GALLIUM_CFLAGS) \
$(RADEON_CFLAGS)
 
noinst_LTLIBRARIES = libradeonwinsys.la
 
libradeonwinsys_la_SOURCES = $(C_SOURCES)
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/Makefile.in
0,0 → 1,767
# Makefile.in generated by automake 1.14 from Makefile.am.
# @configure_input@
 
# Copyright (C) 1994-2013 Free Software Foundation, Inc.
 
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
 
@SET_MAKE@
 
VPATH = @srcdir@
am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
DIST_COMMON = $(srcdir)/Makefile.sources \
$(top_srcdir)/src/gallium/Automake.inc $(srcdir)/Makefile.in \
$(srcdir)/Makefile.am $(top_srcdir)/bin/depcomp
subdir = src/gallium/winsys/radeon/drm
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_prog_bison.m4 \
$(top_srcdir)/m4/ax_prog_cc_for_build.m4 \
$(top_srcdir)/m4/ax_prog_cxx_for_build.m4 \
$(top_srcdir)/m4/ax_prog_flex.m4 \
$(top_srcdir)/m4/ax_pthread.m4 \
$(top_srcdir)/m4/ax_python_module.m4 \
$(top_srcdir)/m4/libtool.m4 $(top_srcdir)/m4/ltoptions.m4 \
$(top_srcdir)/m4/ltsugar.m4 $(top_srcdir)/m4/ltversion.m4 \
$(top_srcdir)/m4/lt~obsolete.m4 $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
mkinstalldirs = $(install_sh) -d
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libradeonwinsys_la_LIBADD =
am__objects_1 = radeon_drm_bo.lo radeon_drm_cs.lo \
radeon_drm_cs_dump.lo radeon_drm_winsys.lo
am_libradeonwinsys_la_OBJECTS = $(am__objects_1)
libradeonwinsys_la_OBJECTS = $(am_libradeonwinsys_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES = -I.@am__isrc@
depcomp = $(SHELL) $(top_srcdir)/bin/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CFLAGS) $(CFLAGS)
AM_V_CC = $(am__v_CC_@AM_V@)
am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
am__v_CC_0 = @echo " CC " $@;
am__v_CC_1 =
CCLD = $(CC)
LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
$(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CCLD = $(am__v_CCLD_@AM_V@)
am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
am__v_CCLD_0 = @echo " CCLD " $@;
am__v_CCLD_1 =
SOURCES = $(libradeonwinsys_la_SOURCES)
DIST_SOURCES = $(libradeonwinsys_la_SOURCES)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BUILD_EXEEXT = @BUILD_EXEEXT@
BUILD_OBJEXT = @BUILD_OBJEXT@
CC = @CC@
CCAS = @CCAS@
CCASDEPMODE = @CCASDEPMODE@
CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CC_FOR_BUILD = @CC_FOR_BUILD@
CFLAGS = @CFLAGS@
CFLAGS_FOR_BUILD = @CFLAGS_FOR_BUILD@
CLANG_RESOURCE_DIR = @CLANG_RESOURCE_DIR@
CLOCK_LIB = @CLOCK_LIB@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CPPFLAGS_FOR_BUILD = @CPPFLAGS_FOR_BUILD@
CPP_FOR_BUILD = @CPP_FOR_BUILD@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXCPPFLAGS_FOR_BUILD = @CXXCPPFLAGS_FOR_BUILD@
CXXCPP_FOR_BUILD = @CXXCPP_FOR_BUILD@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CXXFLAGS_FOR_BUILD = @CXXFLAGS_FOR_BUILD@
CXX_FOR_BUILD = @CXX_FOR_BUILD@
CYGPATH_W = @CYGPATH_W@
DEFINES = @DEFINES@
DEFINES_FOR_BUILD = @DEFINES_FOR_BUILD@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DLOPEN_LIBS = @DLOPEN_LIBS@
DRI2PROTO_CFLAGS = @DRI2PROTO_CFLAGS@
DRI2PROTO_LIBS = @DRI2PROTO_LIBS@
DRIGL_CFLAGS = @DRIGL_CFLAGS@
DRIGL_LIBS = @DRIGL_LIBS@
DRI_DRIVER_INSTALL_DIR = @DRI_DRIVER_INSTALL_DIR@
DRI_DRIVER_SEARCH_DIR = @DRI_DRIVER_SEARCH_DIR@
DRI_LIB_DEPS = @DRI_LIB_DEPS@
DRI_PC_REQ_PRIV = @DRI_PC_REQ_PRIV@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGL_CFLAGS = @EGL_CFLAGS@
EGL_CLIENT_APIS = @EGL_CLIENT_APIS@
EGL_DRIVER_INSTALL_DIR = @EGL_DRIVER_INSTALL_DIR@
EGL_LIB_DEPS = @EGL_LIB_DEPS@
EGL_LIB_GLOB = @EGL_LIB_GLOB@
EGL_LIB_NAME = @EGL_LIB_NAME@
EGL_NATIVE_PLATFORM = @EGL_NATIVE_PLATFORM@
EGL_PLATFORMS = @EGL_PLATFORMS@
EGREP = @EGREP@
ELF_LIB = @ELF_LIB@
EXEEXT = @EXEEXT@
EXPAT_INCLUDES = @EXPAT_INCLUDES@
FGREP = @FGREP@
FREEDRENO_CFLAGS = @FREEDRENO_CFLAGS@
FREEDRENO_LIBS = @FREEDRENO_LIBS@
GALLIUM_DRI_LIB_DEPS = @GALLIUM_DRI_LIB_DEPS@
GALLIUM_PIPE_LOADER_DEFINES = @GALLIUM_PIPE_LOADER_DEFINES@
GALLIUM_PIPE_LOADER_LIBS = @GALLIUM_PIPE_LOADER_LIBS@
GALLIUM_PIPE_LOADER_XCB_CFLAGS = @GALLIUM_PIPE_LOADER_XCB_CFLAGS@
GALLIUM_PIPE_LOADER_XCB_LIBS = @GALLIUM_PIPE_LOADER_XCB_LIBS@
GBM_PC_LIB_PRIV = @GBM_PC_LIB_PRIV@
GBM_PC_REQ_PRIV = @GBM_PC_REQ_PRIV@
GLAPI_LIB_GLOB = @GLAPI_LIB_GLOB@
GLAPI_LIB_NAME = @GLAPI_LIB_NAME@
GLESv1_CM_LIB_DEPS = @GLESv1_CM_LIB_DEPS@
GLESv1_CM_LIB_GLOB = @GLESv1_CM_LIB_GLOB@
GLESv1_CM_LIB_NAME = @GLESv1_CM_LIB_NAME@
GLESv1_CM_PC_LIB_PRIV = @GLESv1_CM_PC_LIB_PRIV@
GLESv2_LIB_DEPS = @GLESv2_LIB_DEPS@
GLESv2_LIB_GLOB = @GLESv2_LIB_GLOB@
GLESv2_LIB_NAME = @GLESv2_LIB_NAME@
GLESv2_PC_LIB_PRIV = @GLESv2_PC_LIB_PRIV@
GLPROTO_CFLAGS = @GLPROTO_CFLAGS@
GLPROTO_LIBS = @GLPROTO_LIBS@
GLX_TLS = @GLX_TLS@
GL_LIB = @GL_LIB@
GL_LIB_DEPS = @GL_LIB_DEPS@
GL_LIB_GLOB = @GL_LIB_GLOB@
GL_LIB_NAME = @GL_LIB_NAME@
GL_PC_CFLAGS = @GL_PC_CFLAGS@
GL_PC_LIB_PRIV = @GL_PC_LIB_PRIV@
GL_PC_REQ_PRIV = @GL_PC_REQ_PRIV@
GREP = @GREP@
HAVE_XF86VIDMODE = @HAVE_XF86VIDMODE@
INDENT = @INDENT@
INDENT_FLAGS = @INDENT_FLAGS@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
INTEL_CFLAGS = @INTEL_CFLAGS@
INTEL_LIBS = @INTEL_LIBS@
LD = @LD@
LDFLAGS = @LDFLAGS@
LDFLAGS_FOR_BUILD = @LDFLAGS_FOR_BUILD@
LEX = @LEX@
LEXLIB = @LEXLIB@
LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
LIBCLC_INCLUDEDIR = @LIBCLC_INCLUDEDIR@
LIBCLC_LIBEXECDIR = @LIBCLC_LIBEXECDIR@
LIBDRM_CFLAGS = @LIBDRM_CFLAGS@
LIBDRM_LIBS = @LIBDRM_LIBS@
LIBDRM_XORG_CFLAGS = @LIBDRM_XORG_CFLAGS@
LIBDRM_XORG_LIBS = @LIBDRM_XORG_LIBS@
LIBKMS_XORG_CFLAGS = @LIBKMS_XORG_CFLAGS@
LIBKMS_XORG_LIBS = @LIBKMS_XORG_LIBS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LIBUDEV_CFLAGS = @LIBUDEV_CFLAGS@
LIBUDEV_LIBS = @LIBUDEV_LIBS@
LIB_DIR = @LIB_DIR@
LIPO = @LIPO@
LLVM_BINDIR = @LLVM_BINDIR@
LLVM_CFLAGS = @LLVM_CFLAGS@
LLVM_CONFIG = @LLVM_CONFIG@
LLVM_CPPFLAGS = @LLVM_CPPFLAGS@
LLVM_CXXFLAGS = @LLVM_CXXFLAGS@
LLVM_INCLUDEDIR = @LLVM_INCLUDEDIR@
LLVM_LDFLAGS = @LLVM_LDFLAGS@
LLVM_LIBDIR = @LLVM_LIBDIR@
LLVM_LIBS = @LLVM_LIBS@
LLVM_VERSION = @LLVM_VERSION@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAKE = @MAKE@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MESA_LLVM = @MESA_LLVM@
MKDIR_P = @MKDIR_P@
NM = @NM@
NMEDIT = @NMEDIT@
NOUVEAU_CFLAGS = @NOUVEAU_CFLAGS@
NOUVEAU_LIBS = @NOUVEAU_LIBS@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OPENCL_LIB_INSTALL_DIR = @OPENCL_LIB_INSTALL_DIR@
OSMESA_LIB = @OSMESA_LIB@
OSMESA_LIB_DEPS = @OSMESA_LIB_DEPS@
OSMESA_LIB_NAME = @OSMESA_LIB_NAME@
OSMESA_MESA_DEPS = @OSMESA_MESA_DEPS@
OSMESA_PC_LIB_PRIV = @OSMESA_PC_LIB_PRIV@
OSMESA_PC_REQ = @OSMESA_PC_REQ@
OSMESA_VERSION = @OSMESA_VERSION@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PERL = @PERL@
PKG_CONFIG = @PKG_CONFIG@
PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
POSIX_SHELL = @POSIX_SHELL@
PTHREAD_CC = @PTHREAD_CC@
PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
PTHREAD_LIBS = @PTHREAD_LIBS@
PYTHON2 = @PYTHON2@
RADEON_CFLAGS = @RADEON_CFLAGS@
RADEON_LIBS = @RADEON_LIBS@
RANLIB = @RANLIB@
SED = @SED@
SELINUX_LIBS = @SELINUX_LIBS@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VDPAU_CFLAGS = @VDPAU_CFLAGS@
VDPAU_LIBS = @VDPAU_LIBS@
VDPAU_LIB_INSTALL_DIR = @VDPAU_LIB_INSTALL_DIR@
VDPAU_MAJOR = @VDPAU_MAJOR@
VDPAU_MINOR = @VDPAU_MINOR@
VERSION = @VERSION@
VG_LIB_DEPS = @VG_LIB_DEPS@
VG_LIB_GLOB = @VG_LIB_GLOB@
VG_LIB_NAME = @VG_LIB_NAME@
VG_PC_LIB_PRIV = @VG_PC_LIB_PRIV@
VISIBILITY_CFLAGS = @VISIBILITY_CFLAGS@
VISIBILITY_CXXFLAGS = @VISIBILITY_CXXFLAGS@
WAYLAND_CFLAGS = @WAYLAND_CFLAGS@
WAYLAND_LIBS = @WAYLAND_LIBS@
WAYLAND_SCANNER = @WAYLAND_SCANNER@
X11_INCLUDES = @X11_INCLUDES@
XA_MAJOR = @XA_MAJOR@
XA_MINOR = @XA_MINOR@
XA_TINY = @XA_TINY@
XA_VERSION = @XA_VERSION@
XCB_DRI2_CFLAGS = @XCB_DRI2_CFLAGS@
XCB_DRI2_LIBS = @XCB_DRI2_LIBS@
XEXT_CFLAGS = @XEXT_CFLAGS@
XEXT_LIBS = @XEXT_LIBS@
XF86VIDMODE_CFLAGS = @XF86VIDMODE_CFLAGS@
XF86VIDMODE_LIBS = @XF86VIDMODE_LIBS@
XLIBGL_CFLAGS = @XLIBGL_CFLAGS@
XLIBGL_LIBS = @XLIBGL_LIBS@
XORG_CFLAGS = @XORG_CFLAGS@
XORG_DRIVER_INSTALL_DIR = @XORG_DRIVER_INSTALL_DIR@
XORG_LIBS = @XORG_LIBS@
XVMC_CFLAGS = @XVMC_CFLAGS@
XVMC_LIBS = @XVMC_LIBS@
XVMC_LIB_INSTALL_DIR = @XVMC_LIB_INSTALL_DIR@
XVMC_MAJOR = @XVMC_MAJOR@
XVMC_MINOR = @XVMC_MINOR@
YACC = @YACC@
YFLAGS = @YFLAGS@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CC_FOR_BUILD = @ac_ct_CC_FOR_BUILD@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_CXX_FOR_BUILD = @ac_ct_CXX_FOR_BUILD@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
ax_pthread_config = @ax_pthread_config@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
target_cpu = @target_cpu@
target_os = @target_os@
target_vendor = @target_vendor@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
C_SOURCES := \
radeon_drm_bo.c \
radeon_drm_cs.c \
radeon_drm_cs_dump.c \
radeon_drm_winsys.c
 
GALLIUM_CFLAGS = \
-I$(top_srcdir)/include \
-I$(top_srcdir)/src/gallium/include \
-I$(top_srcdir)/src/gallium/auxiliary \
$(DEFINES)
 
AM_CFLAGS = \
-I$(top_srcdir)/include \
$(GALLIUM_CFLAGS) \
$(RADEON_CFLAGS)
 
noinst_LTLIBRARIES = libradeonwinsys.la
libradeonwinsys_la_SOURCES = $(C_SOURCES)
all: all-am
 
.SUFFIXES:
.SUFFIXES: .c .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/Makefile.sources $(top_srcdir)/src/gallium/Automake.inc $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/gallium/winsys/radeon/drm/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/gallium/winsys/radeon/drm/Makefile
.PRECIOUS: Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(srcdir)/Makefile.sources $(top_srcdir)/src/gallium/Automake.inc:
 
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
 
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
 
clean-noinstLTLIBRARIES:
-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
@list='$(noinst_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
 
libradeonwinsys.la: $(libradeonwinsys_la_OBJECTS) $(libradeonwinsys_la_DEPENDENCIES) $(EXTRA_libradeonwinsys_la_DEPENDENCIES)
$(AM_V_CCLD)$(LINK) $(libradeonwinsys_la_OBJECTS) $(libradeonwinsys_la_LIBADD) $(LIBS)
 
mostlyclean-compile:
-rm -f *.$(OBJEXT)
 
distclean-compile:
-rm -f *.tab.c
 
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_bo.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_cs.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_cs_dump.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_winsys.Plo@am__quote@
 
.c.o:
@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
 
.c.obj:
@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
 
.c.lo:
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
 
mostlyclean-libtool:
-rm -f *.lo
 
clean-libtool:
-rm -rf .libs _libs
 
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
 
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
 
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
 
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
 
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
 
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
 
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES)
installdirs:
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
 
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
 
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
 
clean-generic:
 
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
 
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
 
clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
mostlyclean-am
 
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
 
dvi: dvi-am
 
dvi-am:
 
html: html-am
 
html-am:
 
info: info-am
 
info-am:
 
install-data-am:
 
install-dvi: install-dvi-am
 
install-dvi-am:
 
install-exec-am:
 
install-html: install-html-am
 
install-html-am:
 
install-info: install-info-am
 
install-info-am:
 
install-man:
 
install-pdf: install-pdf-am
 
install-pdf-am:
 
install-ps: install-ps-am
 
install-ps-am:
 
installcheck-am:
 
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
 
mostlyclean: mostlyclean-am
 
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
 
pdf: pdf-am
 
pdf-am:
 
ps: ps-am
 
ps-am:
 
uninstall-am:
 
.MAKE: install-am install-strip
 
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
clean-libtool clean-noinstLTLIBRARIES cscopelist-am ctags \
ctags-am distclean distclean-compile distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-man install-pdf install-pdf-am \
install-ps install-ps-am install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am
 
 
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/Makefile.sources
0,0 → 1,5
C_SOURCES := \
radeon_drm_bo.c \
radeon_drm_cs.c \
radeon_drm_cs_dump.c \
radeon_drm_winsys.c
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
0,0 → 1,1019
/*
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
 
#define _FILE_OFFSET_BITS 64
#include "radeon_drm_cs.h"
 
#include "util/u_hash_table.h"
#include "util/u_memory.h"
#include "util/u_simple_list.h"
#include "util/u_double_list.h"
#include "os/os_thread.h"
#include "os/os_mman.h"
#include "os/os_time.h"
 
#include "state_tracker/drm_driver.h"
 
#include <sys/ioctl.h>
#include <xf86drm.h>
#include <errno.h>
 
/*
* this are copy from radeon_drm, once an updated libdrm is released
* we should bump configure.ac requirement for it and remove the following
* field
*/
#define RADEON_BO_FLAGS_MACRO_TILE 1
#define RADEON_BO_FLAGS_MICRO_TILE 2
#define RADEON_BO_FLAGS_MICRO_TILE_SQUARE 0x20
 
#ifndef DRM_RADEON_GEM_WAIT
#define DRM_RADEON_GEM_WAIT 0x2b
 
#define RADEON_GEM_NO_WAIT 0x1
#define RADEON_GEM_USAGE_READ 0x2
#define RADEON_GEM_USAGE_WRITE 0x4
 
struct drm_radeon_gem_wait {
uint32_t handle;
uint32_t flags; /* one of RADEON_GEM_* */
};
 
#endif
 
#ifndef RADEON_VA_MAP
 
#define RADEON_VA_MAP 1
#define RADEON_VA_UNMAP 2
 
#define RADEON_VA_RESULT_OK 0
#define RADEON_VA_RESULT_ERROR 1
#define RADEON_VA_RESULT_VA_EXIST 2
 
#define RADEON_VM_PAGE_VALID (1 << 0)
#define RADEON_VM_PAGE_READABLE (1 << 1)
#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
#define RADEON_VM_PAGE_SYSTEM (1 << 3)
#define RADEON_VM_PAGE_SNOOPED (1 << 4)
 
struct drm_radeon_gem_va {
uint32_t handle;
uint32_t operation;
uint32_t vm_id;
uint32_t flags;
uint64_t offset;
};
 
#define DRM_RADEON_GEM_VA 0x2b
#endif
 
 
 
extern const struct pb_vtbl radeon_bo_vtbl;
 
 
static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
{
assert(bo->vtbl == &radeon_bo_vtbl);
return (struct radeon_bo *)bo;
}
 
struct radeon_bo_va_hole {
struct list_head list;
uint64_t offset;
uint64_t size;
};
 
struct radeon_bomgr {
/* Base class. */
struct pb_manager base;
 
/* Winsys. */
struct radeon_drm_winsys *rws;
 
/* List of buffer handles and its mutex. */
struct util_hash_table *bo_handles;
pipe_mutex bo_handles_mutex;
pipe_mutex bo_va_mutex;
 
/* is virtual address supported */
bool va;
uint64_t va_offset;
struct list_head va_holes;
};
 
static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
{
return (struct radeon_bomgr *)mgr;
}
 
static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
{
struct radeon_bo *bo = NULL;
 
if (_buf->vtbl == &radeon_bo_vtbl) {
bo = radeon_bo(_buf);
} else {
struct pb_buffer *base_buf;
pb_size offset;
pb_get_base_buffer(_buf, &base_buf, &offset);
 
if (base_buf->vtbl == &radeon_bo_vtbl)
bo = radeon_bo(base_buf);
}
 
return bo;
}
 
static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
 
while (p_atomic_read(&bo->num_active_ioctls)) {
sched_yield();
}
 
/* XXX use this when it's ready */
/*if (bo->rws->info.drm_minor >= 12) {
struct drm_radeon_gem_wait args = {};
args.handle = bo->handle;
args.flags = usage;
while (drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
&args, sizeof(args)) == -EBUSY);
} else*/ {
struct drm_radeon_gem_wait_idle args;
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
&args, sizeof(args)) == -EBUSY);
}
}
 
static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
 
if (p_atomic_read(&bo->num_active_ioctls)) {
return TRUE;
}
 
/* XXX use this when it's ready */
/*if (bo->rws->info.drm_minor >= 12) {
struct drm_radeon_gem_wait args = {};
args.handle = bo->handle;
args.flags = usage | RADEON_GEM_NO_WAIT;
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_WAIT,
&args, sizeof(args)) != 0;
} else*/ {
struct drm_radeon_gem_busy args;
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
&args, sizeof(args)) != 0;
}
}
 
static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
{
struct radeon_bo_va_hole *hole, *n;
uint64_t offset = 0, waste = 0;
 
pipe_mutex_lock(mgr->bo_va_mutex);
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
offset = hole->offset;
waste = 0;
if (alignment) {
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
}
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
}
if (!waste && hole->size == size) {
offset = hole->offset;
list_del(&hole->list);
FREE(hole);
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
if ((hole->size - waste) > size) {
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
}
hole->size -= (size + waste);
hole->offset += size + waste;
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
}
 
offset = mgr->va_offset;
waste = 0;
if (alignment) {
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
}
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
n->offset = offset;
list_add(&n->list, &mgr->va_holes);
}
offset += waste;
mgr->va_offset += size + waste;
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
 
static void radeon_bomgr_force_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
{
pipe_mutex_lock(mgr->bo_va_mutex);
if (va >= mgr->va_offset) {
if (va > mgr->va_offset) {
struct radeon_bo_va_hole *hole;
hole = CALLOC_STRUCT(radeon_bo_va_hole);
if (hole) {
hole->size = va - mgr->va_offset;
hole->offset = mgr->va_offset;
list_add(&hole->list, &mgr->va_holes);
}
}
mgr->va_offset = va + size;
} else {
struct radeon_bo_va_hole *hole, *n;
uint64_t hole_end, va_end;
 
/* Prune/free all holes that fall into the range
*/
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
hole_end = hole->offset + hole->size;
va_end = va + size;
if (hole->offset >= va_end || hole_end <= va)
continue;
if (hole->offset >= va && hole_end <= va_end) {
list_del(&hole->list);
FREE(hole);
continue;
}
if (hole->offset >= va)
hole->offset = va_end;
else
hole_end = va;
hole->size = hole_end - hole->offset;
}
}
pipe_mutex_unlock(mgr->bo_va_mutex);
}
 
static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
{
struct radeon_bo_va_hole *hole;
 
pipe_mutex_lock(mgr->bo_va_mutex);
if ((va + size) == mgr->va_offset) {
mgr->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
if (!LIST_IS_EMPTY(&mgr->va_holes)) {
hole = container_of(mgr->va_holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
mgr->va_offset = hole->offset;
list_del(&hole->list);
FREE(hole);
}
}
} else {
struct radeon_bo_va_hole *next;
 
hole = container_of(&mgr->va_holes, hole, list);
LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
if (next->offset < va)
break;
hole = next;
}
 
if (&hole->list != &mgr->va_holes) {
/* Grow upper hole if it's adjacent */
if (hole->offset == (va + size)) {
hole->offset = va;
hole->size += size;
/* Merge lower hole if it's adjacent */
if (next != hole && &next->list != &mgr->va_holes &&
(next->offset + next->size) == va) {
next->size += hole->size;
list_del(&hole->list);
FREE(hole);
}
goto out;
}
}
 
/* Grow lower hole if it's adjacent */
if (next != hole && &next->list != &mgr->va_holes &&
(next->offset + next->size) == va) {
next->size += size;
goto out;
}
 
/* FIXME on allocation failure we just lose virtual address space
* maybe print a warning
*/
next = CALLOC_STRUCT(radeon_bo_va_hole);
if (next) {
next->size = size;
next->offset = va;
list_add(&next->list, &hole->list);
}
}
out:
pipe_mutex_unlock(mgr->bo_va_mutex);
}
 
static void radeon_bo_destroy(struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
struct radeon_bomgr *mgr = bo->mgr;
struct drm_gem_close args;
 
memset(&args, 0, sizeof(args));
 
if (bo->name) {
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
util_hash_table_remove(bo->mgr->bo_handles,
(void*)(uintptr_t)bo->name);
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
}
 
if (bo->ptr)
os_munmap(bo->ptr, bo->base.size);
 
/* Close object. */
args.handle = bo->handle;
drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
 
if (mgr->va) {
radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
}
 
pipe_mutex_destroy(bo->map_mutex);
 
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
bo->rws->allocated_vram -= align(bo->base.size, 4096);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
bo->rws->allocated_gtt -= align(bo->base.size, 4096);
FREE(bo);
}
 
void *radeon_bo_do_map(struct radeon_bo *bo)
{
struct drm_radeon_gem_mmap args = {0};
void *ptr;
 
/* Return the pointer if it's already mapped. */
if (bo->ptr)
return bo->ptr;
 
/* Map the buffer. */
pipe_mutex_lock(bo->map_mutex);
/* Return the pointer if it's already mapped (in case of a race). */
if (bo->ptr) {
pipe_mutex_unlock(bo->map_mutex);
return bo->ptr;
}
args.handle = bo->handle;
args.offset = 0;
args.size = (uint64_t)bo->base.size;
if (drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_MMAP,
&args,
sizeof(args))) {
pipe_mutex_unlock(bo->map_mutex);
fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
bo, bo->handle);
return NULL;
}
 
ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
bo->rws->fd, args.addr_ptr);
if (ptr == MAP_FAILED) {
pipe_mutex_unlock(bo->map_mutex);
fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
return NULL;
}
bo->ptr = ptr;
pipe_mutex_unlock(bo->map_mutex);
 
return bo->ptr;
}
 
static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
struct radeon_bo *bo = (struct radeon_bo*)buf;
struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
 
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
return NULL;
}
 
if (radeon_bo_is_busy((struct pb_buffer*)bo,
RADEON_USAGE_WRITE)) {
return NULL;
}
} else {
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
return NULL;
}
 
if (radeon_bo_is_busy((struct pb_buffer*)bo,
RADEON_USAGE_READWRITE)) {
return NULL;
}
}
} else {
uint64_t time = os_time_get_nano();
 
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
cs->flush_cs(cs->flush_data, 0);
}
radeon_bo_wait((struct pb_buffer*)bo,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
if (cs) {
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, 0);
} else {
/* Try to avoid busy-waiting in radeon_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
radeon_drm_cs_sync_flush(rcs);
}
}
 
radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
}
 
bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
}
}
 
return radeon_bo_do_map(bo);
}
 
static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
{
/* NOP */
}
 
static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
unsigned *offset)
{
*base_buf = buf;
*offset = 0;
}
 
static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
/* Always pinned */
return PIPE_OK;
}
 
static void radeon_bo_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
}
 
const struct pb_vtbl radeon_bo_vtbl = {
radeon_bo_destroy,
NULL, /* never called */
NULL, /* never called */
radeon_bo_validate,
radeon_bo_fence,
radeon_bo_get_base_buffer,
};
 
static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
struct radeon_drm_winsys *rws = mgr->rws;
struct radeon_bo *bo;
struct drm_radeon_gem_create args;
struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
int r;
 
memset(&args, 0, sizeof(args));
 
assert(rdesc->initial_domains);
assert((rdesc->initial_domains &
~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
 
args.size = size;
args.alignment = desc->alignment;
args.initial_domain = rdesc->initial_domains;
 
if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
&args, sizeof(args))) {
fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
return NULL;
}
 
bo = CALLOC_STRUCT(radeon_bo);
if (!bo)
return NULL;
 
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = desc->alignment;
bo->base.usage = desc->usage;
bo->base.size = size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->mgr = mgr;
bo->rws = mgr->rws;
bo->handle = args.handle;
bo->va = 0;
bo->initial_domain = rdesc->initial_domains;
pipe_mutex_init(bo->map_mutex);
 
if (mgr->va) {
struct drm_radeon_gem_va va;
 
bo->va_size = align(size, 4096);
bo->va = radeon_bomgr_find_va(mgr, bo->va_size, desc->alignment);
 
va.handle = bo->handle;
va.vm_id = 0;
va.operation = RADEON_VA_MAP;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
radeon_bo_destroy(&bo->base);
return NULL;
}
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
bo->va = va.offset;
radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
}
}
 
if (rdesc->initial_domains & RADEON_DOMAIN_VRAM)
rws->allocated_vram += align(size, 4096);
else if (rdesc->initial_domains & RADEON_DOMAIN_GTT)
rws->allocated_gtt += align(size, 4096);
 
return &bo->base;
}
 
static void radeon_bomgr_flush(struct pb_manager *mgr)
{
/* NOP */
}
 
/* This is for the cache bufmgr. */
static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
 
if (radeon_bo_is_referenced_by_any_cs(bo)) {
return TRUE;
}
 
if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
return TRUE;
}
 
return FALSE;
}
 
static void radeon_bomgr_destroy(struct pb_manager *_mgr)
{
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
util_hash_table_destroy(mgr->bo_handles);
pipe_mutex_destroy(mgr->bo_handles_mutex);
pipe_mutex_destroy(mgr->bo_va_mutex);
FREE(mgr);
}
 
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
 
static unsigned handle_hash(void *key)
{
return PTR_TO_UINT(key);
}
 
static int handle_compare(void *key1, void *key2)
{
return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
}
 
struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
{
struct radeon_bomgr *mgr;
 
mgr = CALLOC_STRUCT(radeon_bomgr);
if (!mgr)
return NULL;
 
mgr->base.destroy = radeon_bomgr_destroy;
mgr->base.create_buffer = radeon_bomgr_create_bo;
mgr->base.flush = radeon_bomgr_flush;
mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;
 
mgr->rws = rws;
mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
pipe_mutex_init(mgr->bo_handles_mutex);
pipe_mutex_init(mgr->bo_va_mutex);
 
mgr->va = rws->info.r600_virtual_address;
mgr->va_offset = rws->info.r600_va_start;
list_inithead(&mgr->va_holes);
 
return &mgr->base;
}
 
static unsigned eg_tile_split(unsigned tile_split)
{
switch (tile_split) {
case 0: tile_split = 64; break;
case 1: tile_split = 128; break;
case 2: tile_split = 256; break;
case 3: tile_split = 512; break;
default:
case 4: tile_split = 1024; break;
case 5: tile_split = 2048; break;
case 6: tile_split = 4096; break;
}
return tile_split;
}
 
static unsigned eg_tile_split_rev(unsigned eg_tile_split)
{
switch (eg_tile_split) {
case 64: return 0;
case 128: return 1;
case 256: return 2;
case 512: return 3;
default:
case 1024: return 4;
case 2048: return 5;
case 4096: return 6;
}
}
 
static void radeon_bo_get_tiling(struct pb_buffer *_buf,
enum radeon_bo_layout *microtiled,
enum radeon_bo_layout *macrotiled,
unsigned *bankw, unsigned *bankh,
unsigned *tile_split,
unsigned *stencil_tile_split,
unsigned *mtilea)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct drm_radeon_gem_set_tiling args;
 
memset(&args, 0, sizeof(args));
 
args.handle = bo->handle;
 
drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_GET_TILING,
&args,
sizeof(args));
 
*microtiled = RADEON_LAYOUT_LINEAR;
*macrotiled = RADEON_LAYOUT_LINEAR;
if (args.tiling_flags & RADEON_BO_FLAGS_MICRO_TILE)
*microtiled = RADEON_LAYOUT_TILED;
 
if (args.tiling_flags & RADEON_BO_FLAGS_MACRO_TILE)
*macrotiled = RADEON_LAYOUT_TILED;
if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
*bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
*bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
*tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
*stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
*mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
*tile_split = eg_tile_split(*tile_split);
}
}
 
static void radeon_bo_set_tiling(struct pb_buffer *_buf,
struct radeon_winsys_cs *rcs,
enum radeon_bo_layout microtiled,
enum radeon_bo_layout macrotiled,
unsigned bankw, unsigned bankh,
unsigned tile_split,
unsigned stencil_tile_split,
unsigned mtilea,
uint32_t pitch)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct drm_radeon_gem_set_tiling args;
 
memset(&args, 0, sizeof(args));
 
/* Tiling determines how DRM treats the buffer data.
* We must flush CS when changing it if the buffer is referenced. */
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, 0);
}
 
while (p_atomic_read(&bo->num_active_ioctls)) {
sched_yield();
}
 
if (microtiled == RADEON_LAYOUT_TILED)
args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE;
else if (microtiled == RADEON_LAYOUT_SQUARETILED)
args.tiling_flags |= RADEON_BO_FLAGS_MICRO_TILE_SQUARE;
 
if (macrotiled == RADEON_LAYOUT_TILED)
args.tiling_flags |= RADEON_BO_FLAGS_MACRO_TILE;
 
args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) <<
RADEON_TILING_EG_BANKW_SHIFT;
args.tiling_flags |= (bankh & RADEON_TILING_EG_BANKH_MASK) <<
RADEON_TILING_EG_BANKH_SHIFT;
if (tile_split) {
args.tiling_flags |= (eg_tile_split_rev(tile_split) &
RADEON_TILING_EG_TILE_SPLIT_MASK) <<
RADEON_TILING_EG_TILE_SPLIT_SHIFT;
}
args.tiling_flags |= (stencil_tile_split &
RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
args.tiling_flags |= (mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
 
args.handle = bo->handle;
args.pitch = pitch;
 
drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_SET_TILING,
&args,
sizeof(args));
}
 
static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(struct pb_buffer *_buf)
{
/* return radeon_bo. */
return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
}
 
static struct pb_buffer *
radeon_winsys_bo_create(struct radeon_winsys *rws,
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
enum radeon_bo_domain domain)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo_desc desc;
struct pb_manager *provider;
struct pb_buffer *buffer;
 
memset(&desc, 0, sizeof(desc));
desc.base.alignment = alignment;
 
/* Additional criteria for the cache manager. */
desc.base.usage = domain;
desc.initial_domains = domain;
 
/* Assign a buffer manager. */
if (use_reusable_pool)
provider = ws->cman;
else
provider = ws->kman;
 
buffer = provider->create_buffer(provider, size, &desc.base);
if (!buffer)
return NULL;
 
return (struct pb_buffer*)buffer;
}
 
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
unsigned *stride)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo *bo;
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
struct drm_gem_open open_arg = {};
int r;
 
memset(&open_arg, 0, sizeof(open_arg));
 
/* We must maintain a list of pairs <handle, bo>, so that we always return
* the same BO for one particular handle. If we didn't do that and created
* more than one BO for the same handle and then relocated them in a CS,
* we would hit a deadlock in the kernel.
*
* The list of pairs is guarded by a mutex, of course. */
pipe_mutex_lock(mgr->bo_handles_mutex);
 
/* First check if there already is an existing bo for the handle. */
bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)whandle->handle);
if (bo) {
/* Increase the refcount. */
struct pb_buffer *b = NULL;
pb_reference(&b, &bo->base);
goto done;
}
 
/* There isn't, create a new one. */
bo = CALLOC_STRUCT(radeon_bo);
if (!bo) {
goto fail;
}
 
/* Open the BO. */
open_arg.name = whandle->handle;
if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
FREE(bo);
goto fail;
}
bo->handle = open_arg.handle;
bo->name = whandle->handle;
 
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = 0;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->base.size = open_arg.size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->mgr = mgr;
bo->rws = mgr->rws;
bo->va = 0;
pipe_mutex_init(bo->map_mutex);
 
util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)whandle->handle, bo);
 
done:
pipe_mutex_unlock(mgr->bo_handles_mutex);
 
if (stride)
*stride = whandle->stride;
 
if (mgr->va && !bo->va) {
struct drm_radeon_gem_va va;
 
bo->va_size = ((bo->base.size + 4095) & ~4095);
bo->va = radeon_bomgr_find_va(mgr, bo->va_size, 1 << 20);
 
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
va.vm_id = 0;
va.offset = bo->va;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
radeon_bo_destroy(&bo->base);
return NULL;
}
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
radeon_bomgr_free_va(mgr, bo->va, bo->va_size);
bo->va = va.offset;
radeon_bomgr_force_va(mgr, bo->va, bo->va_size);
}
}
 
ws->allocated_vram += align(open_arg.size, 4096);
bo->initial_domain = RADEON_DOMAIN_VRAM;
 
return (struct pb_buffer*)bo;
 
fail:
pipe_mutex_unlock(mgr->bo_handles_mutex);
return NULL;
}
 
static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
unsigned stride,
struct winsys_handle *whandle)
{
struct drm_gem_flink flink;
struct radeon_bo *bo = get_radeon_bo(buffer);
 
memset(&flink, 0, sizeof(flink));
 
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
if (!bo->flinked) {
flink.handle = bo->handle;
 
if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
return FALSE;
}
 
bo->flinked = TRUE;
bo->flink = flink.name;
 
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
util_hash_table_set(bo->mgr->bo_handles, (void*)(uintptr_t)bo->flink, bo);
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
}
whandle->handle = bo->flink;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
whandle->handle = bo->handle;
}
 
whandle->stride = stride;
return TRUE;
}
 
static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
{
return ((struct radeon_bo*)buf)->va;
}
 
void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
ws->base.buffer_set_tiling = radeon_bo_set_tiling;
ws->base.buffer_get_tiling = radeon_bo_get_tiling;
ws->base.buffer_map = radeon_bo_map;
ws->base.buffer_unmap = radeon_bo_unmap;
ws->base.buffer_wait = radeon_bo_wait;
ws->base.buffer_is_busy = radeon_bo_is_busy;
ws->base.buffer_create = radeon_winsys_bo_create;
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
}
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_bo.h
0,0 → 1,84
/*
* Copyright © 2008 Jérôme Glisse
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <glisse@freedesktop.org>
* Marek Olšák <maraeo@gmail.com>
*/
#ifndef RADEON_DRM_BO_H
#define RADEON_DRM_BO_H
 
#include "radeon_drm_winsys.h"
#include "pipebuffer/pb_bufmgr.h"
#include "os/os_thread.h"
 
struct radeon_bomgr;
 
struct radeon_bo_desc {
struct pb_desc base;
 
unsigned initial_domains;
};
 
struct radeon_bo {
struct pb_buffer base;
 
struct radeon_bomgr *mgr;
struct radeon_drm_winsys *rws;
 
void *ptr;
pipe_mutex map_mutex;
 
uint32_t handle;
uint32_t name;
uint64_t va;
uint64_t va_size;
enum radeon_bo_domain initial_domain;
 
/* how many command streams is this bo referenced in? */
int num_cs_references;
 
/* how many command streams, which are being emitted in a separate
* thread, is this bo referenced in? */
int num_active_ioctls;
 
boolean flinked;
uint32_t flink;
};
 
struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws);
void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws);
 
static INLINE
void radeon_bo_reference(struct radeon_bo **dst, struct radeon_bo *src)
{
pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
}
 
void *radeon_bo_do_map(struct radeon_bo *bo);
 
#endif
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
0,0 → 1,644
/*
* Copyright © 2008 Jérôme Glisse
* Copyright © 2010 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*
* Based on work from libdrm_radeon by:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
 
/*
This file replaces libdrm's radeon_cs_gem with our own implemention.
It's optimized specifically for Radeon DRM.
Reloc writes and space checking are faster and simpler than their
counterparts in libdrm (the time complexity of all the functions
is O(1) in nearly all scenarios, thanks to hashing).
 
It works like this:
 
cs_add_reloc(cs, buf, read_domain, write_domain) adds a new relocation and
also adds the size of 'buf' to the used_gart and used_vram winsys variables
based on the domains, which are simply or'd for the accounting purposes.
The adding is skipped if the reloc is already present in the list, but it
accounts any newly-referenced domains.
 
cs_validate is then called, which just checks:
used_vram/gart < vram/gart_size * 0.8
The 0.8 number allows for some memory fragmentation. If the validation
fails, the pipe driver flushes CS and tries do the validation again,
i.e. it validates only that one operation. If it fails again, it drops
the operation on the floor and prints some nasty message to stderr.
(done in the pipe driver)
 
cs_write_reloc(cs, buf) just writes a reloc that has been added using
cs_add_reloc. The read_domain and write_domain parameters have been removed,
because we already specify them in cs_add_reloc.
*/
 
#include "radeon_drm_cs.h"
 
#include "util/u_memory.h"
 
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <xf86drm.h>
 
/*
* this are copy from radeon_drm, once an updated libdrm is released
* we should bump configure.ac requirement for it and remove the following
* field
*/
#ifndef RADEON_CHUNK_ID_FLAGS
#define RADEON_CHUNK_ID_FLAGS 0x03
 
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
#endif
 
#ifndef RADEON_CS_USE_VM
#define RADEON_CS_USE_VM 0x02
/* The second dword of RADEON_CHUNK_ID_FLAGS is a uint32 that sets the ring type */
#define RADEON_CS_RING_GFX 0
#define RADEON_CS_RING_COMPUTE 1
#endif
 
#ifndef RADEON_CS_RING_DMA
#define RADEON_CS_RING_DMA 2
#endif
 
#ifndef RADEON_CS_RING_UVD
#define RADEON_CS_RING_UVD 3
#endif
 
#ifndef RADEON_CS_END_OF_FRAME
#define RADEON_CS_END_OF_FRAME 0x04
#endif
 
 
#define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
 
static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
struct radeon_drm_winsys *ws)
{
csc->fd = ws->fd;
csc->nrelocs = 512;
csc->relocs_bo = (struct radeon_bo**)
CALLOC(1, csc->nrelocs * sizeof(struct radeon_bo*));
if (!csc->relocs_bo) {
return FALSE;
}
 
csc->relocs = (struct drm_radeon_cs_reloc*)
CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
if (!csc->relocs) {
FREE(csc->relocs_bo);
return FALSE;
}
 
csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
csc->chunks[0].length_dw = 0;
csc->chunks[0].chunk_data = (uint64_t)(uintptr_t)csc->buf;
csc->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
csc->chunks[1].length_dw = 0;
csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
csc->chunks[2].length_dw = 2;
csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;
 
csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];
csc->chunk_array[1] = (uint64_t)(uintptr_t)&csc->chunks[1];
csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];
 
csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;
return TRUE;
}
 
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
{
unsigned i;
 
for (i = 0; i < csc->crelocs; i++) {
p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
radeon_bo_reference(&csc->relocs_bo[i], NULL);
}
 
csc->crelocs = 0;
csc->validated_crelocs = 0;
csc->chunks[0].length_dw = 0;
csc->chunks[1].length_dw = 0;
csc->used_gart = 0;
csc->used_vram = 0;
memset(csc->is_handle_added, 0, sizeof(csc->is_handle_added));
}
 
static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
{
radeon_cs_context_cleanup(csc);
FREE(csc->relocs_bo);
FREE(csc->relocs);
}
 
 
static struct radeon_winsys_cs *radeon_drm_cs_create(struct radeon_winsys *rws,
enum ring_type ring_type,
struct radeon_winsys_cs_handle *trace_buf)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_drm_cs *cs;
 
cs = CALLOC_STRUCT(radeon_drm_cs);
if (!cs) {
return NULL;
}
pipe_semaphore_init(&cs->flush_completed, 0);
 
cs->ws = ws;
cs->trace_buf = (struct radeon_bo*)trace_buf;
 
if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
FREE(cs);
return NULL;
}
if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
radeon_destroy_cs_context(&cs->csc1);
FREE(cs);
return NULL;
}
 
/* Set the first command buffer as current. */
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
cs->base.buf = cs->csc->buf;
cs->base.ring_type = ring_type;
 
p_atomic_inc(&ws->num_cs);
return &cs->base;
}
 
#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
 
static INLINE void update_reloc_domains(struct drm_radeon_cs_reloc *reloc,
enum radeon_bo_domain rd,
enum radeon_bo_domain wd,
enum radeon_bo_domain *added_domains)
{
*added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);
 
reloc->read_domains |= rd;
reloc->write_domain |= wd;
}
 
int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo)
{
struct drm_radeon_cs_reloc *reloc;
unsigned i;
unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
 
if (csc->is_handle_added[hash]) {
i = csc->reloc_indices_hashlist[hash];
reloc = &csc->relocs[i];
if (reloc->handle == bo->handle) {
return i;
}
 
/* Hash collision, look for the BO in the list of relocs linearly. */
for (i = csc->crelocs; i != 0;) {
--i;
reloc = &csc->relocs[i];
if (reloc->handle == bo->handle) {
/* Put this reloc in the hash list.
* This will prevent additional hash collisions if there are
* several consecutive get_reloc calls for the same buffer.
*
* Example: Assuming buffers A,B,C collide in the hash list,
* the following sequence of relocs:
* AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
* will collide here: ^ and here: ^,
* meaning that we should get very few collisions in the end. */
csc->reloc_indices_hashlist[hash] = i;
/*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
return i;
}
}
}
 
return -1;
}
 
static unsigned radeon_add_reloc(struct radeon_drm_cs *cs,
struct radeon_bo *bo,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
enum radeon_bo_domain *added_domains)
{
struct radeon_cs_context *csc = cs->csc;
struct drm_radeon_cs_reloc *reloc;
unsigned hash = bo->handle & (sizeof(csc->is_handle_added)-1);
enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
bool update_hash = TRUE;
int i;
 
*added_domains = 0;
if (csc->is_handle_added[hash]) {
i = csc->reloc_indices_hashlist[hash];
reloc = &csc->relocs[i];
if (reloc->handle != bo->handle) {
/* Hash collision, look for the BO in the list of relocs linearly. */
for (i = csc->crelocs - 1; i >= 0; i--) {
reloc = &csc->relocs[i];
if (reloc->handle == bo->handle) {
/*printf("write_reloc collision, hash: %i, handle: %i\n", hash, bo->handle);*/
break;
}
}
}
 
if (i >= 0) {
/* On DMA ring we need to emit as many relocation as there is use of the bo
* thus each time this function is call we should grow add again the bo to
* the relocation buffer
*
* Do not update the hash table if it's dma ring, so that first hash always point
* to first bo relocation which will the one used by the kernel. Following relocation
* will be ignore by the kernel memory placement (but still use by the kernel to
* update the cmd stream with proper buffer offset).
*/
update_hash = FALSE;
update_reloc_domains(reloc, rd, wd, added_domains);
if (cs->base.ring_type != RING_DMA) {
csc->reloc_indices_hashlist[hash] = i;
return i;
}
}
}
 
/* New relocation, check if the backing array is large enough. */
if (csc->crelocs >= csc->nrelocs) {
uint32_t size;
csc->nrelocs += 10;
 
size = csc->nrelocs * sizeof(struct radeon_bo*);
csc->relocs_bo = realloc(csc->relocs_bo, size);
 
size = csc->nrelocs * sizeof(struct drm_radeon_cs_reloc);
csc->relocs = realloc(csc->relocs, size);
 
csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
}
 
/* Initialize the new relocation. */
csc->relocs_bo[csc->crelocs] = NULL;
radeon_bo_reference(&csc->relocs_bo[csc->crelocs], bo);
p_atomic_inc(&bo->num_cs_references);
reloc = &csc->relocs[csc->crelocs];
reloc->handle = bo->handle;
reloc->read_domains = rd;
reloc->write_domain = wd;
reloc->flags = 0;
 
csc->is_handle_added[hash] = TRUE;
if (update_hash) {
csc->reloc_indices_hashlist[hash] = csc->crelocs;
}
 
csc->chunks[1].length_dw += RELOC_DWORDS;
 
*added_domains = rd | wd;
return csc->crelocs++;
}
 
static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)buf;
enum radeon_bo_domain added_domains;
unsigned index = radeon_add_reloc(cs, bo, usage, domains, &added_domains);
 
if (added_domains & RADEON_DOMAIN_GTT)
cs->csc->used_gart += bo->base.size;
if (added_domains & RADEON_DOMAIN_VRAM)
cs->csc->used_vram += bo->base.size;
 
return index;
}
 
static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
boolean status =
cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
cs->csc->used_vram < cs->ws->info.vram_size * 0.8;
 
if (status) {
cs->csc->validated_crelocs = cs->csc->crelocs;
} else {
/* Remove lately-added relocations. The validation failed with them
* and the CS is about to be flushed because of that. Keep only
* the already-validated relocations. */
unsigned i;
 
for (i = cs->csc->validated_crelocs; i < cs->csc->crelocs; i++) {
p_atomic_dec(&cs->csc->relocs_bo[i]->num_cs_references);
radeon_bo_reference(&cs->csc->relocs_bo[i], NULL);
}
cs->csc->crelocs = cs->csc->validated_crelocs;
 
/* Flush if there are any relocs. Clean up otherwise. */
if (cs->csc->crelocs) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC);
} else {
radeon_cs_context_cleanup(cs->csc);
 
assert(cs->base.cdw == 0);
if (cs->base.cdw != 0) {
fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
}
}
}
return status;
}
 
static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
boolean status =
(cs->csc->used_gart + gtt) < cs->ws->info.gart_size * 0.7 &&
(cs->csc->used_vram + vram) < cs->ws->info.vram_size * 0.7;
 
return status;
}
 
static void radeon_drm_cs_write_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)buf;
unsigned index = radeon_get_reloc(cs->csc, bo);
 
if (index == -1) {
fprintf(stderr, "radeon: Cannot get a relocation in %s.\n", __func__);
return;
}
 
OUT_CS(&cs->base, 0xc0001000);
OUT_CS(&cs->base, index * RELOC_DWORDS);
}
 
void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
{
unsigned i;
 
if (drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
&csc->cs, sizeof(struct drm_radeon_cs))) {
if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
unsigned i;
 
fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
for (i = 0; i < csc->chunks[0].length_dw; i++) {
fprintf(stderr, "0x%08X\n", csc->buf[i]);
}
} else {
fprintf(stderr, "radeon: The kernel rejected CS, "
"see dmesg for more information.\n");
}
}
 
if (cs->trace_buf) {
radeon_dump_cs_on_lockup(cs, csc);
}
 
for (i = 0; i < csc->crelocs; i++)
p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
 
radeon_cs_context_cleanup(csc);
}
 
/*
* Make sure previous submission of this cs are completed
*/
void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
/* Wait for any pending ioctl to complete. */
if (cs->ws->thread && cs->flush_started) {
pipe_semaphore_wait(&cs->flush_completed);
cs->flush_started = 0;
}
}
 
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
 
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs, unsigned flags, uint32_t cs_trace_id)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_cs_context *tmp;
 
switch (cs->base.ring_type) {
case RING_DMA:
/* pad DMA ring to 8 DWs */
if (cs->ws->info.chip_class <= SI) {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x00000000); /* NOP packet */
}
break;
case RING_GFX:
/* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
* r6xx, requires at least 4 dw alignment to avoid a hw bug.
*/
if (flags & RADEON_FLUSH_COMPUTE) {
if (cs->ws->info.chip_class <= SI) {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
}
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
}
break;
}
 
if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
fprintf(stderr, "radeon: command stream overflowed\n");
}
 
radeon_drm_cs_sync_flush(rcs);
 
/* Flip command streams. */
tmp = cs->csc;
cs->csc = cs->cst;
cs->cst = tmp;
 
cs->cst->cs_trace_id = cs_trace_id;
 
/* If the CS is not empty or overflowed, emit it in a separate thread. */
if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
unsigned i, crelocs = cs->cst->crelocs;
 
cs->cst->chunks[0].length_dw = cs->base.cdw;
 
for (i = 0; i < crelocs; i++) {
/* Update the number of active asynchronous CS ioctls for the buffer. */
p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
}
 
switch (cs->base.ring_type) {
case RING_DMA:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_DMA;
cs->cst->cs.num_chunks = 3;
if (cs->ws->info.r600_virtual_address) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
}
break;
 
case RING_UVD:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_UVD;
cs->cst->cs.num_chunks = 3;
break;
 
default:
case RING_GFX:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 2;
if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
cs->cst->cs.num_chunks = 3;
}
if (cs->ws->info.r600_virtual_address) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_END_OF_FRAME) {
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_COMPUTE) {
cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
cs->cst->cs.num_chunks = 3;
}
break;
}
 
if (cs->ws->thread && (flags & RADEON_FLUSH_ASYNC)) {
cs->flush_started = 1;
radeon_drm_ws_queue_cs(cs->ws, cs);
} else {
pipe_mutex_lock(cs->ws->cs_stack_lock);
if (cs->ws->thread) {
while (p_atomic_read(&cs->ws->ncs)) {
pipe_condvar_wait(cs->ws->cs_queue_empty, cs->ws->cs_stack_lock);
}
}
pipe_mutex_unlock(cs->ws->cs_stack_lock);
radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
}
} else {
radeon_cs_context_cleanup(cs->cst);
}
 
/* Prepare a new CS. */
cs->base.buf = cs->csc->buf;
cs->base.cdw = 0;
}
 
static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
radeon_drm_cs_sync_flush(rcs);
pipe_semaphore_destroy(&cs->flush_completed);
radeon_cs_context_cleanup(&cs->csc1);
radeon_cs_context_cleanup(&cs->csc2);
p_atomic_dec(&cs->ws->num_cs);
radeon_destroy_cs_context(&cs->csc1);
radeon_destroy_cs_context(&cs->csc2);
FREE(cs);
}
 
static void radeon_drm_cs_set_flush(struct radeon_winsys_cs *rcs,
void (*flush)(void *ctx, unsigned flags),
void *user)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
cs->flush_cs = flush;
cs->flush_data = user;
}
 
static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *_buf,
enum radeon_bo_usage usage)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)_buf;
int index;
 
if (!bo->num_cs_references)
return FALSE;
 
index = radeon_get_reloc(cs->csc, bo);
if (index == -1)
return FALSE;
 
if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
return TRUE;
if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
return TRUE;
 
return FALSE;
}
 
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.cs_create = radeon_drm_cs_create;
ws->base.cs_destroy = radeon_drm_cs_destroy;
ws->base.cs_add_reloc = radeon_drm_cs_add_reloc;
ws->base.cs_validate = radeon_drm_cs_validate;
ws->base.cs_memory_below_limit = radeon_drm_cs_memory_below_limit;
ws->base.cs_write_reloc = radeon_drm_cs_write_reloc;
ws->base.cs_flush = radeon_drm_cs_flush;
ws->base.cs_set_flush_callback = radeon_drm_cs_set_flush;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
}
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_cs.h
0,0 → 1,129
/*
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
 
#ifndef RADEON_DRM_CS_H
#define RADEON_DRM_CS_H
 
#include "radeon_drm_bo.h"
#include <radeon_drm.h>
 
struct radeon_cs_context {
uint32_t buf[RADEON_MAX_CMDBUF_DWORDS];
 
int fd;
struct drm_radeon_cs cs;
struct drm_radeon_cs_chunk chunks[3];
uint64_t chunk_array[3];
uint32_t flags[2];
 
uint32_t cs_trace_id;
 
/* Relocs. */
unsigned nrelocs;
unsigned crelocs;
unsigned validated_crelocs;
struct radeon_bo **relocs_bo;
struct drm_radeon_cs_reloc *relocs;
 
/* 0 = BO not added, 1 = BO added */
char is_handle_added[512];
unsigned reloc_indices_hashlist[512];
 
unsigned used_vram;
unsigned used_gart;
};
 
struct radeon_drm_cs {
struct radeon_winsys_cs base;
 
/* We flip between these two CS. While one is being consumed
* by the kernel in another thread, the other one is being filled
* by the pipe driver. */
struct radeon_cs_context csc1;
struct radeon_cs_context csc2;
/* The currently-used CS. */
struct radeon_cs_context *csc;
/* The CS being currently-owned by the other thread. */
struct radeon_cs_context *cst;
 
/* The winsys. */
struct radeon_drm_winsys *ws;
 
/* Flush CS. */
void (*flush_cs)(void *ctx, unsigned flags);
void *flush_data;
 
int flush_started;
pipe_semaphore flush_completed;
struct radeon_bo *trace_buf;
};
 
int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo);
 
static INLINE struct radeon_drm_cs *
radeon_drm_cs(struct radeon_winsys_cs *base)
{
return (struct radeon_drm_cs*)base;
}
 
static INLINE boolean
radeon_bo_is_referenced_by_cs(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
int num_refs = bo->num_cs_references;
return num_refs == bo->rws->num_cs ||
(num_refs && radeon_get_reloc(cs->csc, bo) != -1);
}
 
static INLINE boolean
radeon_bo_is_referenced_by_cs_for_write(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
int index;
 
if (!bo->num_cs_references)
return FALSE;
 
index = radeon_get_reloc(cs->csc, bo);
if (index == -1)
return FALSE;
 
return cs->csc->relocs[index].write_domain != 0;
}
 
static INLINE boolean
radeon_bo_is_referenced_by_any_cs(struct radeon_bo *bo)
{
return bo->num_cs_references != 0;
}
 
void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs);
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws);
void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc);
 
void radeon_dump_cs_on_lockup(struct radeon_drm_cs *cs, struct radeon_cs_context *csc);
 
#endif
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_cs_dump.c
0,0 → 1,160
/*
* Copyright © 2013 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <jglisse@redhat.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <xf86drm.h>
#include "radeon_drm_cs.h"
#include "radeon_drm_bo.h"
 
#define RADEON_CS_DUMP_AFTER_MS_TIMEOUT 500
 
void radeon_dump_cs_on_lockup(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
{
struct drm_radeon_gem_busy args;
FILE *dump;
unsigned i, lockup;
uint32_t *ptr;
char fname[32];
 
/* only dump the first cs to cause a lockup */
if (!csc->crelocs) {
/* can not determine if there was a lockup if no bo were use by
* the cs and most likely in such case no lockup occurs
*/
return;
}
 
memset(&args, 0, sizeof(args));
args.handle = csc->relocs_bo[0]->handle;
for (i = 0; i < RADEON_CS_DUMP_AFTER_MS_TIMEOUT; i++) {
usleep(1);
lockup = drmCommandWriteRead(csc->fd, DRM_RADEON_GEM_BUSY, &args, sizeof(args));
if (!lockup) {
break;
}
}
if (!lockup || i < RADEON_CS_DUMP_AFTER_MS_TIMEOUT) {
return;
}
 
ptr = radeon_bo_do_map(cs->trace_buf);
fprintf(stderr, "timeout on cs lockup likely happen at cs 0x%08x dw 0x%08x\n", ptr[1], ptr[0]);
 
if (csc->cs_trace_id != ptr[1]) {
return;
}
 
/* ok we are most likely facing a lockup write the standalone replay file */
snprintf(fname, sizeof(fname), "rlockup_0x%08x.c", csc->cs_trace_id);
dump = fopen(fname, "w");
if (dump == NULL) {
return;
}
fprintf(dump, "/* To build this file you will need to copy radeon_ctx.h\n");
fprintf(dump, " * in same directory. You can find radeon_ctx.h in mesa tree :\n");
fprintf(dump, " * mesa/src/gallium/winsys/radeon/tools/radeon_ctx.h\n");
fprintf(dump, " * Build with :\n");
fprintf(dump, " * gcc -O0 -g %s -ldrm -o rlockup_0x%08x -I/usr/include/libdrm\n", fname, csc->cs_trace_id);
fprintf(dump, " */\n");
fprintf(dump, " /* timeout on cs lockup likely happen at cs 0x%08x dw 0x%08x*/\n", ptr[1], ptr[0]);
fprintf(dump, "#include <stdio.h>\n");
fprintf(dump, "#include <stdint.h>\n");
fprintf(dump, "#include \"radeon_ctx.h\"\n");
fprintf(dump, "\n");
fprintf(dump, "#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))\n");
fprintf(dump, "\n");
 
for (i = 0; i < csc->crelocs; i++) {
unsigned j, ndw = (csc->relocs_bo[i]->base.size + 3) >> 2;
 
ptr = radeon_bo_do_map(csc->relocs_bo[i]);
if (ptr) {
fprintf(dump, "static uint32_t bo_%04d_data[%d] = {\n ", i, ndw);
for (j = 0; j < ndw; j++) {
if (j && !(j % 8)) {
uint32_t offset = (j - 8) << 2;
fprintf(dump, " /* [0x%08x] va[0x%016lx] */\n ", offset, offset + csc->relocs_bo[i]->va);
}
fprintf(dump, " 0x%08x,", ptr[j]);
}
fprintf(dump, "};\n\n");
}
}
 
fprintf(dump, "static uint32_t bo_relocs[%d] = {\n", csc->crelocs * 4);
for (i = 0; i < csc->crelocs; i++) {
fprintf(dump, " 0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
0, csc->relocs[i].read_domains, csc->relocs[i].write_domain, csc->relocs[i].flags);
}
fprintf(dump, "};\n\n");
 
fprintf(dump, "/* cs %d dw */\n", csc->chunks[0].length_dw);
fprintf(dump, "static uint32_t cs[] = {\n");
ptr = csc->buf;
for (i = 0; i < csc->chunks[0].length_dw; i++) {
fprintf(dump, " 0x%08x,\n", ptr[i]);
}
fprintf(dump, "};\n\n");
 
fprintf(dump, "static uint32_t cs_flags[2] = {\n");
fprintf(dump, " 0x%08x,\n", csc->flags[0]);
fprintf(dump, " 0x%08x,\n", csc->flags[1]);
fprintf(dump, "};\n\n");
 
fprintf(dump, "int main(int argc, char *argv[])\n");
fprintf(dump, "{\n");
fprintf(dump, " struct bo *bo[%d];\n", csc->crelocs);
fprintf(dump, " struct ctx ctx;\n");
fprintf(dump, "\n");
fprintf(dump, " ctx_init(&ctx);\n");
fprintf(dump, "\n");
 
for (i = 0; i < csc->crelocs; i++) {
unsigned ndw = (csc->relocs_bo[i]->base.size + 3) >> 2;
uint32_t *ptr;
 
ptr = radeon_bo_do_map(csc->relocs_bo[i]);
if (ptr) {
fprintf(dump, " bo[%d] = bo_new(&ctx, %d, bo_%04d_data, 0x%016lx, 0x%08x);\n",
i, ndw, i, csc->relocs_bo[i]->va, csc->relocs_bo[i]->base.alignment);
} else {
fprintf(dump, " bo[%d] = bo_new(&ctx, %d, NULL, 0x%016lx, 0x%08x);\n",
i, ndw, csc->relocs_bo[i]->va, csc->relocs_bo[i]->base.alignment);
}
}
fprintf(dump, "\n");
fprintf(dump, " ctx_cs(&ctx, cs, cs_flags, ARRAY_SIZE(cs), bo, bo_relocs, %d);\n", csc->crelocs);
fprintf(dump, "\n");
fprintf(dump, " fprintf(stderr, \"waiting for cs execution to end ....\\n\");\n");
fprintf(dump, " bo_wait(&ctx, bo[0]);\n");
fprintf(dump, "}\n");
fclose(dump);
}
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_public.h
0,0 → 1,10
#ifndef RADEON_DRM_PUBLIC_H
#define RADEON_DRM_PUBLIC_H
 
#include "pipe/p_defines.h"
 
struct radeon_winsys;
 
struct radeon_winsys *radeon_drm_winsys_create(int fd);
 
#endif
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
0,0 → 1,669
/*
* Copyright © 2009 Corbin Simpson
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Corbin Simpson <MostAwesomeDude@gmail.com>
* Joakim Sindholt <opensource@zhasha.com>
* Marek Olšák <maraeo@gmail.com>
*/
 
#include "radeon_drm_bo.h"
#include "radeon_drm_cs.h"
#include "radeon_drm_public.h"
 
#include "pipebuffer/pb_bufmgr.h"
#include "util/u_memory.h"
#include "util/u_hash_table.h"
 
#include <xf86drm.h>
#include <stdio.h>
 
/*
* this are copy from radeon_drm, once an updated libdrm is released
* we should bump configure.ac requirement for it and remove the following
* field
*/
#ifndef RADEON_INFO_TILING_CONFIG
#define RADEON_INFO_TILING_CONFIG 6
#endif
 
#ifndef RADEON_INFO_WANT_HYPERZ
#define RADEON_INFO_WANT_HYPERZ 7
#endif
 
#ifndef RADEON_INFO_WANT_CMASK
#define RADEON_INFO_WANT_CMASK 8
#endif
 
#ifndef RADEON_INFO_CLOCK_CRYSTAL_FREQ
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 9
#endif
 
#ifndef RADEON_INFO_NUM_BACKENDS
#define RADEON_INFO_NUM_BACKENDS 0xa
#endif
 
#ifndef RADEON_INFO_NUM_TILE_PIPES
#define RADEON_INFO_NUM_TILE_PIPES 0xb
#endif
 
#ifndef RADEON_INFO_BACKEND_MAP
#define RADEON_INFO_BACKEND_MAP 0xd
#endif
 
#ifndef RADEON_INFO_VA_START
/* virtual address start, va < start are reserved by the kernel */
#define RADEON_INFO_VA_START 0x0e
/* maximum size of ib using the virtual memory cs */
#define RADEON_INFO_IB_VM_MAX_SIZE 0x0f
#endif
 
#ifndef RADEON_INFO_MAX_PIPES
#define RADEON_INFO_MAX_PIPES 0x10
#endif
 
#ifndef RADEON_INFO_TIMESTAMP
#define RADEON_INFO_TIMESTAMP 0x11
#endif
 
#ifndef RADEON_INFO_RING_WORKING
#define RADEON_INFO_RING_WORKING 0x15
#endif
 
#ifndef RADEON_CS_RING_UVD
#define RADEON_CS_RING_UVD 3
#endif
 
static struct util_hash_table *fd_tab = NULL;
 
/* Enable/disable feature access for one command stream.
* If enable == TRUE, return TRUE on success.
* Otherwise, return FALSE.
*
* We basically do the same thing kernel does, because we have to deal
* with multiple contexts (here command streams) backed by one winsys. */
static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
pipe_mutex *mutex,
unsigned request, const char *request_name,
boolean enable)
{
struct drm_radeon_info info;
unsigned value = enable ? 1 : 0;
 
memset(&info, 0, sizeof(info));
 
pipe_mutex_lock(*mutex);
 
/* Early exit if we are sure the request will fail. */
if (enable) {
if (*owner) {
pipe_mutex_unlock(*mutex);
return FALSE;
}
} else {
if (*owner != applier) {
pipe_mutex_unlock(*mutex);
return FALSE;
}
}
 
/* Pass through the request to the kernel. */
info.value = (unsigned long)&value;
info.request = request;
if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
&info, sizeof(info)) != 0) {
pipe_mutex_unlock(*mutex);
return FALSE;
}
 
/* Update the rights in the winsys. */
if (enable) {
if (value) {
*owner = applier;
printf("radeon: Acquired access to %s.\n", request_name);
pipe_mutex_unlock(*mutex);
return TRUE;
}
} else {
*owner = NULL;
printf("radeon: Released access to %s.\n", request_name);
}
 
pipe_mutex_unlock(*mutex);
return FALSE;
}
 
static boolean radeon_get_drm_value(int fd, unsigned request,
const char *errname, uint32_t *out)
{
struct drm_radeon_info info;
int retval;
 
memset(&info, 0, sizeof(info));
 
info.value = (unsigned long)out;
info.request = request;
 
retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
if (retval) {
if (errname) {
fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
errname, retval);
}
return FALSE;
}
return TRUE;
}
 
/* Helper function to do the ioctls needed for setup and init. */
static boolean do_winsys_init(struct radeon_drm_winsys *ws)
{
struct drm_radeon_gem_info gem_info;
int retval;
drmVersionPtr version;
 
memset(&gem_info, 0, sizeof(gem_info));
 
/* We do things in a specific order here.
*
* DRM version first. We need to be sure we're running on a KMS chipset.
* This is also for some features.
*
* Then, the PCI ID. This is essential and should return usable numbers
* for all Radeons. If this fails, we probably got handed an FD for some
* non-Radeon card.
*
* The GEM info is actually bogus on the kernel side, as well as our side
* (see radeon_gem_info_ioctl in radeon_gem.c) but that's alright because
* we don't actually use the info for anything yet.
*
* The GB and Z pipe requests should always succeed, but they might not
* return sensical values for all chipsets, but that's alright because
* the pipe drivers already know that.
*/
 
/* Get DRM version. */
version = drmGetVersion(ws->fd);
if (version->version_major != 2 ||
version->version_minor < 3) {
fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
"only compatible with 2.3.x (kernel 2.6.34) or later.\n",
__FUNCTION__,
version->version_major,
version->version_minor,
version->version_patchlevel);
drmFreeVersion(version);
return FALSE;
}
 
ws->info.drm_major = version->version_major;
ws->info.drm_minor = version->version_minor;
ws->info.drm_patchlevel = version->version_patchlevel;
drmFreeVersion(version);
 
/* Get PCI ID. */
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
&ws->info.pci_id))
return FALSE;
 
/* Check PCI ID. */
switch (ws->info.pci_id) {
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R300; break;
#include "pci_ids/r300_pci_ids.h"
#undef CHIPSET
 
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R600; break;
#include "pci_ids/r600_pci_ids.h"
#undef CHIPSET
 
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_SI; break;
#include "pci_ids/radeonsi_pci_ids.h"
#undef CHIPSET
 
default:
fprintf(stderr, "radeon: Invalid PCI ID.\n");
return FALSE;
}
 
switch (ws->info.family) {
default:
case CHIP_UNKNOWN:
fprintf(stderr, "radeon: Unknown family.\n");
return FALSE;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV370:
case CHIP_RV380:
case CHIP_RS400:
case CHIP_RC410:
case CHIP_RS480:
ws->info.chip_class = R300;
break;
case CHIP_R420: /* R4xx-based cores. */
case CHIP_R423:
case CHIP_R430:
case CHIP_R480:
case CHIP_R481:
case CHIP_RV410:
case CHIP_RS600:
case CHIP_RS690:
case CHIP_RS740:
ws->info.chip_class = R400;
break;
case CHIP_RV515: /* R5xx-based cores. */
case CHIP_R520:
case CHIP_RV530:
case CHIP_R580:
case CHIP_RV560:
case CHIP_RV570:
ws->info.chip_class = R500;
break;
case CHIP_R600:
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV670:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RS780:
case CHIP_RS880:
ws->info.chip_class = R600;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
ws->info.chip_class = R700;
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
ws->info.chip_class = EVERGREEN;
break;
case CHIP_CAYMAN:
case CHIP_ARUBA:
ws->info.chip_class = CAYMAN;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
ws->info.chip_class = SI;
break;
case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
ws->info.chip_class = CIK;
break;
}
 
/* Check for dma */
ws->info.r600_has_dma = FALSE;
if (ws->info.chip_class >= R700 && ws->info.drm_minor >= 27) {
ws->info.r600_has_dma = TRUE;
}
 
/* Check for UVD */
ws->info.has_uvd = FALSE;
if (ws->info.drm_minor >= 32) {
uint32_t value = RADEON_CS_RING_UVD;
if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
"UVD Ring working", &value))
ws->info.has_uvd = value;
}
 
/* Get GEM info. */
retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
&gem_info, sizeof(gem_info));
if (retval) {
fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
retval);
return FALSE;
}
ws->info.gart_size = gem_info.gart_size;
ws->info.vram_size = gem_info.vram_size;
 
ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 
/* Generation-specific queries. */
if (ws->gen == DRV_R300) {
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
"GB pipe count",
&ws->info.r300_num_gb_pipes))
return FALSE;
 
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
"Z pipe count",
&ws->info.r300_num_z_pipes))
return FALSE;
}
else if (ws->gen >= DRV_R600) {
if (ws->info.drm_minor >= 9 &&
!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
"num backends",
&ws->info.r600_num_backends))
return FALSE;
 
/* get the GPU counter frequency, failure is not fatal */
radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
&ws->info.r600_clock_crystal_freq);
 
radeon_get_drm_value(ws->fd, RADEON_INFO_TILING_CONFIG, NULL,
&ws->info.r600_tiling_config);
 
if (ws->info.drm_minor >= 11) {
radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_TILE_PIPES, NULL,
&ws->info.r600_num_tile_pipes);
 
if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
&ws->info.r600_backend_map))
ws->info.r600_backend_map_valid = TRUE;
}
 
ws->info.r600_virtual_address = FALSE;
if (ws->info.drm_minor >= 13) {
ws->info.r600_virtual_address = TRUE;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
&ws->info.r600_va_start))
ws->info.r600_virtual_address = FALSE;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
&ws->info.r600_ib_vm_max_size))
ws->info.r600_virtual_address = FALSE;
}
if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
ws->info.r600_virtual_address = FALSE;
}
 
/* Get max pipes, this is only needed for compute shaders. All evergreen+
* chips have at least 2 pipes, so we use 2 as a default. */
ws->info.r600_max_pipes = 2;
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
&ws->info.r600_max_pipes);
 
return TRUE;
}
 
static void radeon_winsys_destroy(struct radeon_winsys *rws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
 
if (ws->thread) {
ws->kill_thread = 1;
pipe_semaphore_signal(&ws->cs_queued);
pipe_thread_wait(ws->thread);
}
pipe_semaphore_destroy(&ws->cs_queued);
pipe_condvar_destroy(ws->cs_queue_empty);
 
if (!pipe_reference(&ws->base.reference, NULL)) {
return;
}
 
pipe_mutex_destroy(ws->hyperz_owner_mutex);
pipe_mutex_destroy(ws->cmask_owner_mutex);
pipe_mutex_destroy(ws->cs_stack_lock);
 
ws->cman->destroy(ws->cman);
ws->kman->destroy(ws->kman);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
}
if (fd_tab) {
util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd));
}
FREE(rws);
}
 
static void radeon_query_info(struct radeon_winsys *rws,
struct radeon_info *info)
{
*info = ((struct radeon_drm_winsys *)rws)->info;
}
 
static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
boolean enable)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
switch (fid) {
case RADEON_FID_R300_HYPERZ_ACCESS:
return radeon_set_fd_access(cs, &cs->ws->hyperz_owner,
&cs->ws->hyperz_owner_mutex,
RADEON_INFO_WANT_HYPERZ, "Hyper-Z",
enable);
 
case RADEON_FID_R300_CMASK_ACCESS:
return radeon_set_fd_access(cs, &cs->ws->cmask_owner,
&cs->ws->cmask_owner_mutex,
RADEON_INFO_WANT_CMASK, "AA optimizations",
enable);
}
return FALSE;
}
 
static int radeon_drm_winsys_surface_init(struct radeon_winsys *rws,
struct radeon_surface *surf)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
 
return radeon_surface_init(ws->surf_man, surf);
}
 
static int radeon_drm_winsys_surface_best(struct radeon_winsys *rws,
struct radeon_surface *surf)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
 
return radeon_surface_best(ws->surf_man, surf);
}
 
static uint64_t radeon_query_value(struct radeon_winsys *rws,
enum radeon_value_id value)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
uint64_t ts = 0;
 
switch (value) {
case RADEON_REQUESTED_VRAM_MEMORY:
return ws->allocated_vram;
case RADEON_REQUESTED_GTT_MEMORY:
return ws->allocated_gtt;
case RADEON_BUFFER_WAIT_TIME_NS:
return ws->buffer_wait_time;
case RADEON_TIMESTAMP:
if (ws->info.drm_minor < 20 || ws->gen < DRV_R600) {
assert(0);
return 0;
}
 
radeon_get_drm_value(ws->fd, RADEON_INFO_TIMESTAMP, "timestamp",
(uint32_t*)&ts);
return ts;
}
return 0;
}
 
static unsigned hash_fd(void *key)
{
return pointer_to_intptr(key);
}
 
static int compare_fd(void *key1, void *key2)
{
return pointer_to_intptr(key1) != pointer_to_intptr(key2);
}
 
void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
{
retry:
pipe_mutex_lock(ws->cs_stack_lock);
if (p_atomic_read(&ws->ncs) >= RING_LAST) {
/* no room left for a flush */
pipe_mutex_unlock(ws->cs_stack_lock);
goto retry;
}
ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
p_atomic_inc(&ws->ncs);
pipe_mutex_unlock(ws->cs_stack_lock);
pipe_semaphore_signal(&ws->cs_queued);
}
 
static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
struct radeon_drm_cs *cs;
unsigned i, empty_stack;
 
while (1) {
pipe_semaphore_wait(&ws->cs_queued);
if (ws->kill_thread)
break;
next:
pipe_mutex_lock(ws->cs_stack_lock);
cs = ws->cs_stack[0];
pipe_mutex_unlock(ws->cs_stack_lock);
 
if (cs) {
radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
 
pipe_mutex_lock(ws->cs_stack_lock);
for (i = 1; i < p_atomic_read(&ws->ncs); i++) {
ws->cs_stack[i - 1] = ws->cs_stack[i];
}
ws->cs_stack[p_atomic_read(&ws->ncs) - 1] = NULL;
empty_stack = p_atomic_dec_zero(&ws->ncs);
if (empty_stack) {
pipe_condvar_signal(ws->cs_queue_empty);
}
pipe_mutex_unlock(ws->cs_stack_lock);
 
pipe_semaphore_signal(&cs->flush_completed);
 
if (!empty_stack) {
goto next;
}
}
}
pipe_mutex_lock(ws->cs_stack_lock);
for (i = 0; i < p_atomic_read(&ws->ncs); i++) {
pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
ws->cs_stack[i] = NULL;
}
p_atomic_set(&ws->ncs, 0);
pipe_condvar_signal(ws->cs_queue_empty);
pipe_mutex_unlock(ws->cs_stack_lock);
return NULL;
}
 
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
 
struct radeon_winsys *radeon_drm_winsys_create(int fd)
{
struct radeon_drm_winsys *ws;
 
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
}
 
ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (ws) {
pipe_reference(NULL, &ws->base.reference);
return &ws->base;
}
 
ws = CALLOC_STRUCT(radeon_drm_winsys);
if (!ws) {
return NULL;
}
ws->fd = fd;
util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
 
if (!do_winsys_init(ws))
goto fail;
 
/* Create managers. */
ws->kman = radeon_bomgr_create(ws);
if (!ws->kman)
goto fail;
ws->cman = pb_cache_manager_create(ws->kman, 1000000);
if (!ws->cman)
goto fail;
 
if (ws->gen >= DRV_R600) {
ws->surf_man = radeon_surface_manager_new(fd);
if (!ws->surf_man)
goto fail;
}
 
/* init reference */
pipe_reference_init(&ws->base.reference, 1);
 
/* Set functions. */
ws->base.destroy = radeon_winsys_destroy;
ws->base.query_info = radeon_query_info;
ws->base.cs_request_feature = radeon_cs_request_feature;
ws->base.surface_init = radeon_drm_winsys_surface_init;
ws->base.surface_best = radeon_drm_winsys_surface_best;
ws->base.query_value = radeon_query_value;
 
radeon_bomgr_init_functions(ws);
radeon_drm_cs_init_functions(ws);
 
pipe_mutex_init(ws->hyperz_owner_mutex);
pipe_mutex_init(ws->cmask_owner_mutex);
pipe_mutex_init(ws->cs_stack_lock);
 
p_atomic_set(&ws->ncs, 0);
pipe_semaphore_init(&ws->cs_queued, 0);
pipe_condvar_init(ws->cs_queue_empty);
if (ws->num_cpus > 1 && debug_get_option_thread())
ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
 
return &ws->base;
 
fail:
if (ws->cman)
ws->cman->destroy(ws->cman);
if (ws->kman)
ws->kman->destroy(ws->kman);
if (ws->surf_man)
radeon_surface_manager_free(ws->surf_man);
FREE(ws);
return NULL;
}
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
0,0 → 1,89
/*
* Copyright © 2009 Corbin Simpson
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Corbin Simpson <MostAwesomeDude@gmail.com>
*/
#ifndef RADEON_DRM_WINSYS_H
#define RADEON_DRM_WINSYS_H
 
#include "radeon_winsys.h"
#include "os/os_thread.h"
 
struct radeon_drm_cs;
 
enum radeon_generation {
DRV_R300,
DRV_R600,
DRV_SI
};
 
struct radeon_drm_winsys {
struct radeon_winsys base;
 
int fd; /* DRM file descriptor */
int num_cs; /* The number of command streams created. */
uint64_t allocated_vram;
uint64_t allocated_gtt;
uint64_t buffer_wait_time; /* time spent in buffer_wait in ns */
 
enum radeon_generation gen;
struct radeon_info info;
 
struct pb_manager *kman;
struct pb_manager *cman;
struct radeon_surface_manager *surf_man;
 
uint32_t num_cpus; /* Number of CPUs. */
 
struct radeon_drm_cs *hyperz_owner;
pipe_mutex hyperz_owner_mutex;
struct radeon_drm_cs *cmask_owner;
pipe_mutex cmask_owner_mutex;
 
/* rings submission thread */
pipe_mutex cs_stack_lock;
pipe_semaphore cs_queued;
/* we cannot use semaphore for empty queue because maintaining an even
* number of call to semaphore_wait and semaphore_signal is, to say the
* least, tricky
*/
pipe_condvar cs_queue_empty;
pipe_thread thread;
int kill_thread;
int ncs;
struct radeon_drm_cs *cs_stack[RING_LAST];
};
 
static INLINE struct radeon_drm_winsys *
radeon_drm_winsys(struct radeon_winsys *base)
{
return (struct radeon_drm_winsys*)base;
}
 
void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs);
 
#endif
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/drm/radeon_winsys.h
0,0 → 1,504
/*
* Copyright 2008 Corbin Simpson <MostAwesomeDude@gmail.com>
* Copyright 2010 Marek Olšák <maraeo@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE. */
 
#ifndef RADEON_WINSYS_H
#define RADEON_WINSYS_H
 
/* The public winsys interface header for the radeon driver. */
 
/* R300 features in DRM.
*
* 2.6.0:
* - Hyper-Z
* - GB_Z_PEQ_CONFIG on rv350->r4xx
* - R500 FG_ALPHA_VALUE
*
* 2.8.0:
* - R500 US_FORMAT regs
* - R500 ARGB2101010 colorbuffer
* - CMask and AA regs
* - R16F/RG16F
*/
 
#include "pipebuffer/pb_buffer.h"
#include "libdrm/radeon_surface.h"
 
#define RADEON_MAX_CMDBUF_DWORDS (16 * 1024)
 
#define RADEON_FLUSH_ASYNC (1 << 0)
#define RADEON_FLUSH_KEEP_TILING_FLAGS (1 << 1) /* needs DRM 2.12.0 */
#define RADEON_FLUSH_COMPUTE (1 << 2)
#define RADEON_FLUSH_END_OF_FRAME (1 << 3)
 
/* Tiling flags. */
enum radeon_bo_layout {
RADEON_LAYOUT_LINEAR = 0,
RADEON_LAYOUT_TILED,
RADEON_LAYOUT_SQUARETILED,
 
RADEON_LAYOUT_UNKNOWN
};
 
enum radeon_bo_domain { /* bitfield */
RADEON_DOMAIN_GTT = 2,
RADEON_DOMAIN_VRAM = 4
};
 
enum radeon_bo_usage { /* bitfield */
RADEON_USAGE_READ = 2,
RADEON_USAGE_WRITE = 4,
RADEON_USAGE_READWRITE = RADEON_USAGE_READ | RADEON_USAGE_WRITE
};
 
enum radeon_family {
CHIP_UNKNOWN = 0,
CHIP_R300, /* R3xx-based cores. */
CHIP_R350,
CHIP_RV350,
CHIP_RV370,
CHIP_RV380,
CHIP_RS400,
CHIP_RC410,
CHIP_RS480,
CHIP_R420, /* R4xx-based cores. */
CHIP_R423,
CHIP_R430,
CHIP_R480,
CHIP_R481,
CHIP_RV410,
CHIP_RS600,
CHIP_RS690,
CHIP_RS740,
CHIP_RV515, /* R5xx-based cores. */
CHIP_R520,
CHIP_RV530,
CHIP_R580,
CHIP_RV560,
CHIP_RV570,
CHIP_R600,
CHIP_RV610,
CHIP_RV630,
CHIP_RV670,
CHIP_RV620,
CHIP_RV635,
CHIP_RS780,
CHIP_RS880,
CHIP_RV770,
CHIP_RV730,
CHIP_RV710,
CHIP_RV740,
CHIP_CEDAR,
CHIP_REDWOOD,
CHIP_JUNIPER,
CHIP_CYPRESS,
CHIP_HEMLOCK,
CHIP_PALM,
CHIP_SUMO,
CHIP_SUMO2,
CHIP_BARTS,
CHIP_TURKS,
CHIP_CAICOS,
CHIP_CAYMAN,
CHIP_ARUBA,
CHIP_TAHITI,
CHIP_PITCAIRN,
CHIP_VERDE,
CHIP_OLAND,
CHIP_HAINAN,
CHIP_BONAIRE,
CHIP_KAVERI,
CHIP_KABINI,
CHIP_LAST,
};
 
enum chip_class {
CLASS_UNKNOWN = 0,
R300,
R400,
R500,
R600,
R700,
EVERGREEN,
CAYMAN,
SI,
CIK,
};
 
enum ring_type {
RING_GFX = 0,
RING_DMA,
RING_UVD,
RING_LAST,
};
 
enum radeon_value_id {
RADEON_REQUESTED_VRAM_MEMORY,
RADEON_REQUESTED_GTT_MEMORY,
RADEON_BUFFER_WAIT_TIME_NS,
RADEON_TIMESTAMP
};
 
struct winsys_handle;
struct radeon_winsys_cs_handle;
 
struct radeon_winsys_cs {
unsigned cdw; /* Number of used dwords. */
uint32_t *buf; /* The command buffer. */
enum ring_type ring_type;
};
 
struct radeon_info {
uint32_t pci_id;
enum radeon_family family;
enum chip_class chip_class;
uint32_t gart_size;
uint32_t vram_size;
 
uint32_t drm_major; /* version */
uint32_t drm_minor;
uint32_t drm_patchlevel;
 
boolean has_uvd;
 
uint32_t r300_num_gb_pipes;
uint32_t r300_num_z_pipes;
 
uint32_t r600_num_backends;
uint32_t r600_clock_crystal_freq;
uint32_t r600_tiling_config;
uint32_t r600_num_tile_pipes;
uint32_t r600_backend_map;
uint32_t r600_va_start;
uint32_t r600_ib_vm_max_size;
uint32_t r600_max_pipes;
boolean r600_backend_map_valid;
boolean r600_virtual_address;
boolean r600_has_dma;
};
 
enum radeon_feature_id {
RADEON_FID_R300_HYPERZ_ACCESS, /* ZMask + HiZ */
RADEON_FID_R300_CMASK_ACCESS,
};
 
struct radeon_winsys {
/**
* Reference counting
*/
struct pipe_reference reference;
 
/**
* Destroy this winsys.
*
* \param ws The winsys this function is called from.
*/
void (*destroy)(struct radeon_winsys *ws);
 
/**
* Query an info structure from winsys.
*
* \param ws The winsys this function is called from.
* \param info Return structure
*/
void (*query_info)(struct radeon_winsys *ws,
struct radeon_info *info);
 
/**************************************************************************
* Buffer management. Buffer attributes are mostly fixed over its lifetime.
*
* Remember that gallium gets to choose the interface it needs, and the
* window systems must then implement that interface (rather than the
* other way around...).
*************************************************************************/
 
/**
* Create a buffer object.
*
* \param ws The winsys this function is called from.
* \param size The size to allocate.
* \param alignment An alignment of the buffer in memory.
* \param use_reusable_pool Whether the cache buffer manager should be used.
* \param domain A bitmask of the RADEON_DOMAIN_* flags.
* \return The created buffer object.
*/
struct pb_buffer *(*buffer_create)(struct radeon_winsys *ws,
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
enum radeon_bo_domain domain);
 
struct radeon_winsys_cs_handle *(*buffer_get_cs_handle)(
struct pb_buffer *buf);
 
/**
* Map the entire data store of a buffer object into the client's address
* space.
*
* \param buf A winsys buffer object to map.
* \param cs A command stream to flush if the buffer is referenced by it.
* \param usage A bitmask of the PIPE_TRANSFER_* flags.
* \return The pointer at the beginning of the buffer.
*/
void *(*buffer_map)(struct radeon_winsys_cs_handle *buf,
struct radeon_winsys_cs *cs,
enum pipe_transfer_usage usage);
 
/**
* Unmap a buffer object from the client's address space.
*
* \param buf A winsys buffer object to unmap.
*/
void (*buffer_unmap)(struct radeon_winsys_cs_handle *buf);
 
/**
* Return TRUE if a buffer object is being used by the GPU.
*
* \param buf A winsys buffer object.
* \param usage Only check whether the buffer is busy for the given usage.
*/
boolean (*buffer_is_busy)(struct pb_buffer *buf,
enum radeon_bo_usage usage);
 
/**
* Wait for a buffer object until it is not used by a GPU. This is
* equivalent to a fence placed after the last command using the buffer,
* and synchronizing to the fence.
*
* \param buf A winsys buffer object to wait for.
* \param usage Only wait until the buffer is idle for the given usage,
* but may still be busy for some other usage.
*/
void (*buffer_wait)(struct pb_buffer *buf, enum radeon_bo_usage usage);
 
/**
* Return tiling flags describing a memory layout of a buffer object.
*
* \param buf A winsys buffer object to get the flags from.
* \param macrotile A pointer to the return value of the microtile flag.
* \param microtile A pointer to the return value of the macrotile flag.
*
* \note microtile and macrotile are not bitmasks!
*/
void (*buffer_get_tiling)(struct pb_buffer *buf,
enum radeon_bo_layout *microtile,
enum radeon_bo_layout *macrotile,
unsigned *bankw, unsigned *bankh,
unsigned *tile_split,
unsigned *stencil_tile_split,
unsigned *mtilea);
 
/**
* Set tiling flags describing a memory layout of a buffer object.
*
* \param buf A winsys buffer object to set the flags for.
* \param cs A command stream to flush if the buffer is referenced by it.
* \param macrotile A macrotile flag.
* \param microtile A microtile flag.
* \param stride A stride of the buffer in bytes, for texturing.
*
* \note microtile and macrotile are not bitmasks!
*/
void (*buffer_set_tiling)(struct pb_buffer *buf,
struct radeon_winsys_cs *rcs,
enum radeon_bo_layout microtile,
enum radeon_bo_layout macrotile,
unsigned bankw, unsigned bankh,
unsigned tile_split,
unsigned stencil_tile_split,
unsigned mtilea,
unsigned stride);
 
/**
* Get a winsys buffer from a winsys handle. The internal structure
* of the handle is platform-specific and only a winsys should access it.
*
* \param ws The winsys this function is called from.
* \param whandle A winsys handle pointer as was received from a state
* tracker.
* \param stride The returned buffer stride in bytes.
*/
struct pb_buffer *(*buffer_from_handle)(struct radeon_winsys *ws,
struct winsys_handle *whandle,
unsigned *stride);
 
/**
* Get a winsys handle from a winsys buffer. The internal structure
* of the handle is platform-specific and only a winsys should access it.
*
* \param buf A winsys buffer object to get the handle from.
* \param whandle A winsys handle pointer.
* \param stride A stride of the buffer in bytes, for texturing.
* \return TRUE on success.
*/
boolean (*buffer_get_handle)(struct pb_buffer *buf,
unsigned stride,
struct winsys_handle *whandle);
 
/**
* Return the virtual address of a buffer.
*
* \param buf A winsys buffer object
* \return virtual address
*/
uint64_t (*buffer_get_virtual_address)(struct radeon_winsys_cs_handle *buf);
 
/**************************************************************************
* Command submission.
*
* Each pipe context should create its own command stream and submit
* commands independently of other contexts.
*************************************************************************/
 
/**
* Create a command stream.
*
* \param ws The winsys this function is called from.
* \param ring_type The ring type (GFX, DMA, UVD)
* \param trace_buf Trace buffer when tracing is enabled
*/
struct radeon_winsys_cs *(*cs_create)(struct radeon_winsys *ws,
enum ring_type ring_type,
struct radeon_winsys_cs_handle *trace_buf);
 
/**
* Destroy a command stream.
*
* \param cs A command stream to destroy.
*/
void (*cs_destroy)(struct radeon_winsys_cs *cs);
 
/**
* Add a new buffer relocation. Every relocation must first be added
* before it can be written.
*
* \param cs A command stream to add buffer for validation against.
* \param buf A winsys buffer to validate.
* \param usage Whether the buffer is used for read and/or write.
* \param domain Bitmask of the RADEON_DOMAIN_* flags.
* \return Relocation index.
*/
unsigned (*cs_add_reloc)(struct radeon_winsys_cs *cs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domain);
 
/**
* Return TRUE if there is enough memory in VRAM and GTT for the relocs
* added so far. If the validation fails, all the relocations which have
* been added since the last call of cs_validate will be removed and
* the CS will be flushed (provided there are still any relocations).
*
* \param cs A command stream to validate.
*/
boolean (*cs_validate)(struct radeon_winsys_cs *cs);
 
/**
* Return TRUE if there is enough memory in VRAM and GTT for the relocs
* added so far.
*
* \param cs A command stream to validate.
* \param vram VRAM memory size pending to be use
* \param gtt GTT memory size pending to be use
*/
boolean (*cs_memory_below_limit)(struct radeon_winsys_cs *cs, uint64_t vram, uint64_t gtt);
 
/**
* Write a relocated dword to a command buffer.
*
* \param cs A command stream the relocation is written to.
* \param buf A winsys buffer to write the relocation for.
*/
void (*cs_write_reloc)(struct radeon_winsys_cs *cs,
struct radeon_winsys_cs_handle *buf);
 
/**
* Flush a command stream.
*
* \param cs A command stream to flush.
* \param flags, RADEON_FLUSH_ASYNC or 0.
* \param cs_trace_id A unique identifiant for the cs
*/
void (*cs_flush)(struct radeon_winsys_cs *cs, unsigned flags, uint32_t cs_trace_id);
 
/**
* Set a flush callback which is called from winsys when flush is
* required.
*
* \param cs A command stream to set the callback for.
* \param flush A flush callback function associated with the command stream.
* \param user A user pointer that will be passed to the flush callback.
*/
void (*cs_set_flush_callback)(struct radeon_winsys_cs *cs,
void (*flush)(void *ctx, unsigned flags),
void *ctx);
 
/**
* Return TRUE if a buffer is referenced by a command stream.
*
* \param cs A command stream.
* \param buf A winsys buffer.
*/
boolean (*cs_is_buffer_referenced)(struct radeon_winsys_cs *cs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage);
 
/**
* Request access to a feature for a command stream.
*
* \param cs A command stream.
* \param fid Feature ID, one of RADEON_FID_*
* \param enable Whether to enable or disable the feature.
*/
boolean (*cs_request_feature)(struct radeon_winsys_cs *cs,
enum radeon_feature_id fid,
boolean enable);
/**
* Make sure all asynchronous flush of the cs have completed
*
* \param cs A command stream.
*/
void (*cs_sync_flush)(struct radeon_winsys_cs *cs);
 
/**
* Initialize surface
*
* \param ws The winsys this function is called from.
* \param surf Surface structure ptr
*/
int (*surface_init)(struct radeon_winsys *ws,
struct radeon_surface *surf);
 
/**
* Find best values for a surface
*
* \param ws The winsys this function is called from.
* \param surf Surface structure ptr
*/
int (*surface_best)(struct radeon_winsys *ws,
struct radeon_surface *surf);
 
uint64_t (*query_value)(struct radeon_winsys *ws,
enum radeon_value_id value);
};
 
#endif
/contrib/sdk/sources/Mesa/src/gallium/winsys/radeon/tools/radeon_ctx.h
0,0 → 1,235
/*
* Copyright 2011 Jerome Glisse <glisse@freedesktop.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jérôme Glisse
*/
#ifndef RADEON_CTX_H
#define RADEON_CTX_H
 
#define _FILE_OFFSET_BITS 64
#include <sys/mman.h>
 
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "xf86drm.h"
#include "radeon_drm.h"
 
#ifndef RADEON_CHUNK_ID_FLAGS
#define RADEON_CHUNK_ID_FLAGS 0x03
/* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */
#define RADEON_CS_KEEP_TILING_FLAGS 0x01
#endif
 
 
#ifndef RADEON_VA_MAP
 
#define RADEON_VA_MAP 1
#define RADEON_VA_UNMAP 2
#define RADEON_VA_RESULT_OK 0
#define RADEON_VA_RESULT_ERROR 1
#define RADEON_VA_RESULT_VA_EXIST 2
#define RADEON_VM_PAGE_VALID (1 << 0)
#define RADEON_VM_PAGE_READABLE (1 << 1)
#define RADEON_VM_PAGE_WRITEABLE (1 << 2)
#define RADEON_VM_PAGE_SYSTEM (1 << 3)
#define RADEON_VM_PAGE_SNOOPED (1 << 4)
struct drm_radeon_gem_va {
uint32_t handle;
uint32_t operation;
uint32_t vm_id;
uint32_t flags;
uint64_t offset;
};
#define DRM_RADEON_GEM_VA 0x2b
#endif
 
 
struct ctx {
int fd;
};
 
struct bo {
uint32_t handle;
uint32_t alignment;
uint64_t size;
uint64_t va;
void *ptr;
};
 
static void ctx_init(struct ctx *ctx)
{
ctx->fd = drmOpen("radeon", NULL);
if (ctx->fd < 0) {
fprintf(stderr, "failed to open radeon drm device file\n");
exit(-1);
}
}
 
static void bo_wait(struct ctx *ctx, struct bo *bo)
{
struct drm_radeon_gem_wait_idle args;
void *ptr;
int r;
 
/* Zero out args to make valgrind happy */
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
do {
r = drmCommandWrite(ctx->fd, DRM_RADEON_GEM_WAIT_IDLE, &args, sizeof(args));
} while (r == -EBUSY);
}
 
 
static void ctx_cs(struct ctx *ctx, uint32_t *cs, uint32_t cs_flags[2], unsigned ndw,
struct bo **bo, uint32_t *bo_relocs, unsigned nbo)
{
struct drm_radeon_cs args;
struct drm_radeon_cs_chunk chunks[3];
uint64_t chunk_array[3];
unsigned i;
int r;
 
/* update handle */
for (i = 0; i < nbo; i++) {
bo_relocs[i*4+0] = bo[i]->handle;
}
 
args.num_chunks = 2;
if (cs_flags[0] || cs_flags[1]) {
/* enable RADEON_CHUNK_ID_FLAGS */
args.num_chunks = 3;
}
args.chunks = (uint64_t)(uintptr_t)chunk_array;
chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
chunks[0].length_dw = ndw;
chunks[0].chunk_data = (uintptr_t)cs;
chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
chunks[1].length_dw = nbo * 4;
chunks[1].chunk_data = (uintptr_t)bo_relocs;
chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
chunks[2].length_dw = 2;
chunks[2].chunk_data = (uintptr_t)cs_flags;
chunk_array[0] = (uintptr_t)&chunks[0];
chunk_array[1] = (uintptr_t)&chunks[1];
chunk_array[2] = (uintptr_t)&chunks[2];
 
fprintf(stderr, "emiting cs %ddw with %d bo\n", ndw, nbo);
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_CS, &args, sizeof(args));
if (r) {
fprintf(stderr, "cs submission failed with %d\n", r);
return;
}
}
 
static void bo_map(struct ctx *ctx, struct bo *bo)
{
struct drm_radeon_gem_mmap args;
void *ptr;
int r;
 
/* Zero out args to make valgrind happy */
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
args.offset = 0;
args.size = (uint64_t)bo->size;
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_GEM_MMAP, &args, sizeof(args));
if (r) {
fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n", bo, bo->handle, r);
exit(-1);
}
ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, ctx->fd, args.addr_ptr);
if (ptr == MAP_FAILED) {
fprintf(stderr, "%s failed to map bo\n", __func__);
exit(-1);
}
bo->ptr = ptr;
}
 
static void bo_va(struct ctx *ctx, struct bo *bo)
{
struct drm_radeon_gem_va args;
int r;
 
args.handle = bo->handle;
args.vm_id = 0;
args.operation = RADEON_VA_MAP;
args.flags = RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_WRITEABLE | RADEON_VM_PAGE_SNOOPED;
args.offset = bo->va;
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_GEM_VA, &args, sizeof(args));
if (r && args.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", bo->size);
fprintf(stderr, "radeon: alignment : %d bytes\n", bo->alignment);
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
exit(-1);
}
}
 
static struct bo *bo_new(struct ctx *ctx, unsigned ndw, uint32_t *data, uint64_t va, uint32_t alignment)
{
struct drm_radeon_gem_create args;
struct bo *bo;
int r;
 
bo = calloc(1, sizeof(*bo));
if (bo == NULL) {
fprintf(stderr, "failed to malloc bo struct\n");
exit(-1);
}
bo->size = ndw * 4ULL;
bo->va = va;
bo->alignment = alignment;
 
args.size = bo->size;
args.alignment = bo->alignment;
args.initial_domain = RADEON_GEM_DOMAIN_GTT;
args.flags = 0;
args.handle = 0;
 
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_GEM_CREATE, &args, sizeof(args));
bo->handle = args.handle;
if (r) {
fprintf(stderr, "Failed to allocate :\n");
fprintf(stderr, " size : %d bytes\n", bo->size);
fprintf(stderr, " alignment : %d bytes\n", bo->alignment);
free(bo);
exit(-1);
}
 
if (data) {
bo_map(ctx, bo);
memcpy(bo->ptr, data, bo->size);
}
 
if (va) {
bo_va(ctx, bo);
}
 
return bo;
}
 
 
#endif