Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 5563 → Rev 5564

/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/Makefile.am
0,0 → 1,12
include Makefile.sources
include $(top_srcdir)/src/gallium/Automake.inc
 
AM_CFLAGS = \
$(GALLIUM_WINSYS_CFLAGS) \
$(RADEON_CFLAGS)
 
noinst_LTLIBRARIES = libradeonwinsys.la
 
libradeonwinsys_la_SOURCES = $(C_SOURCES)
 
EXTRA_DIST = $(TOOLS_HDR)
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/Makefile.in
0,0 → 1,857
# Makefile.in generated by automake 1.15 from Makefile.am.
# @configure_input@
 
# Copyright (C) 1994-2014 Free Software Foundation, Inc.
 
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
 
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
 
@SET_MAKE@
 
VPATH = @srcdir@
am__is_gnu_make = { \
if test -z '$(MAKELEVEL)'; then \
false; \
elif test -n '$(MAKE_HOST)'; then \
true; \
elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
true; \
else \
false; \
fi; \
}
am__make_running_with_option = \
case $${target_option-} in \
?) ;; \
*) echo "am__make_running_with_option: internal error: invalid" \
"target option '$${target_option-}' specified" >&2; \
exit 1;; \
esac; \
has_opt=no; \
sane_makeflags=$$MAKEFLAGS; \
if $(am__is_gnu_make); then \
sane_makeflags=$$MFLAGS; \
else \
case $$MAKEFLAGS in \
*\\[\ \ ]*) \
bs=\\; \
sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
| sed "s/$$bs$$bs[$$bs $$bs ]*//g"`;; \
esac; \
fi; \
skip_next=no; \
strip_trailopt () \
{ \
flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
}; \
for flg in $$sane_makeflags; do \
test $$skip_next = yes && { skip_next=no; continue; }; \
case $$flg in \
*=*|--*) continue;; \
-*I) strip_trailopt 'I'; skip_next=yes;; \
-*I?*) strip_trailopt 'I';; \
-*O) strip_trailopt 'O'; skip_next=yes;; \
-*O?*) strip_trailopt 'O';; \
-*l) strip_trailopt 'l'; skip_next=yes;; \
-*l?*) strip_trailopt 'l';; \
-[dEDm]) skip_next=yes;; \
-[JT]) skip_next=yes;; \
esac; \
case $$flg in \
*$$target_option*) has_opt=yes; break;; \
esac; \
done; \
test $$has_opt = yes
am__make_dryrun = (target_option=n; $(am__make_running_with_option))
am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
pkgdatadir = $(datadir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkglibexecdir = $(libexecdir)/@PACKAGE@
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
target_triplet = @target@
@HAVE_DRISW_TRUE@am__append_1 = \
@HAVE_DRISW_TRUE@ $(top_builddir)/src/gallium/winsys/sw/dri/libswdri.la
 
@NEED_WINSYS_XLIB_TRUE@am__append_2 = \
@NEED_WINSYS_XLIB_TRUE@ $(top_builddir)/src/gallium/winsys/sw/xlib/libws_xlib.la \
@NEED_WINSYS_XLIB_TRUE@ -lX11 -lXext -lXfixes \
@NEED_WINSYS_XLIB_TRUE@ $(LIBDRM_LIBS)
 
subdir = src/gallium/winsys/radeon/drm
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
am__aclocal_m4_deps = $(top_srcdir)/m4/ax_check_gnu_make.m4 \
$(top_srcdir)/m4/ax_check_python_mako_module.m4 \
$(top_srcdir)/m4/ax_gcc_builtin.m4 \
$(top_srcdir)/m4/ax_gcc_func_attribute.m4 \
$(top_srcdir)/m4/ax_prog_bison.m4 \
$(top_srcdir)/m4/ax_prog_flex.m4 \
$(top_srcdir)/m4/ax_pthread.m4 $(top_srcdir)/m4/libtool.m4 \
$(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \
$(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \
$(top_srcdir)/VERSION $(top_srcdir)/configure.ac
am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
$(ACLOCAL_M4)
DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_CLEAN_FILES =
CONFIG_CLEAN_VPATH_FILES =
LTLIBRARIES = $(noinst_LTLIBRARIES)
libradeonwinsys_la_LIBADD =
am__objects_1 = radeon_drm_bo.lo radeon_drm_cs.lo \
radeon_drm_cs_dump.lo radeon_drm_surface.lo \
radeon_drm_winsys.lo
am_libradeonwinsys_la_OBJECTS = $(am__objects_1)
libradeonwinsys_la_OBJECTS = $(am_libradeonwinsys_la_OBJECTS)
AM_V_lt = $(am__v_lt_@AM_V@)
am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
am__v_lt_0 = --silent
am__v_lt_1 =
AM_V_P = $(am__v_P_@AM_V@)
am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
am__v_P_0 = false
am__v_P_1 = :
AM_V_GEN = $(am__v_GEN_@AM_V@)
am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
am__v_GEN_0 = @echo " GEN " $@;
am__v_GEN_1 =
AM_V_at = $(am__v_at_@AM_V@)
am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
am__v_at_0 = @
am__v_at_1 =
DEFAULT_INCLUDES = -I.@am__isrc@
depcomp = $(SHELL) $(top_srcdir)/bin/depcomp
am__depfiles_maybe = depfiles
am__mv = mv -f
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
$(AM_CFLAGS) $(CFLAGS)
AM_V_CC = $(am__v_CC_@AM_V@)
am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
am__v_CC_0 = @echo " CC " $@;
am__v_CC_1 =
CCLD = $(CC)
LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
$(AM_LDFLAGS) $(LDFLAGS) -o $@
AM_V_CCLD = $(am__v_CCLD_@AM_V@)
am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
am__v_CCLD_0 = @echo " CCLD " $@;
am__v_CCLD_1 =
SOURCES = $(libradeonwinsys_la_SOURCES)
DIST_SOURCES = $(libradeonwinsys_la_SOURCES)
am__can_run_installinfo = \
case $$AM_UPDATE_INFO_DIR in \
n|no|NO) false;; \
*) (install-info --version) >/dev/null 2>&1;; \
esac
am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
# Read a list of newline-separated strings from the standard input,
# and print each of them once, without duplicates. Input order is
# *not* preserved.
am__uniquify_input = $(AWK) '\
BEGIN { nonempty = 0; } \
{ items[$$0] = 1; nonempty = 1; } \
END { if (nonempty) { for (i in items) print i; }; } \
'
# Make sure the list of sources is unique. This is necessary because,
# e.g., the same source file might be shared among _SOURCES variables
# for different programs/libraries.
am__define_uniq_tagged_files = \
list='$(am__tagged_files)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | $(am__uniquify_input)`
ETAGS = etags
CTAGS = ctags
am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.sources \
$(top_srcdir)/bin/depcomp \
$(top_srcdir)/src/gallium/Automake.inc
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
AMTAR = @AMTAR@
AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
BSYMBOLIC = @BSYMBOLIC@
CC = @CC@
CCAS = @CCAS@
CCASDEPMODE = @CCASDEPMODE@
CCASFLAGS = @CCASFLAGS@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CLANG_RESOURCE_DIR = @CLANG_RESOURCE_DIR@
CLOCK_LIB = @CLOCK_LIB@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
D3D_DRIVER_INSTALL_DIR = @D3D_DRIVER_INSTALL_DIR@
DEFINES = @DEFINES@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DLLTOOL = @DLLTOOL@
DLOPEN_LIBS = @DLOPEN_LIBS@
DRI2PROTO_CFLAGS = @DRI2PROTO_CFLAGS@
DRI2PROTO_LIBS = @DRI2PROTO_LIBS@
DRI3PROTO_CFLAGS = @DRI3PROTO_CFLAGS@
DRI3PROTO_LIBS = @DRI3PROTO_LIBS@
DRIGL_CFLAGS = @DRIGL_CFLAGS@
DRIGL_LIBS = @DRIGL_LIBS@
DRI_DRIVER_INSTALL_DIR = @DRI_DRIVER_INSTALL_DIR@
DRI_DRIVER_SEARCH_DIR = @DRI_DRIVER_SEARCH_DIR@
DRI_LIB_DEPS = @DRI_LIB_DEPS@
DRI_PC_REQ_PRIV = @DRI_PC_REQ_PRIV@
DSYMUTIL = @DSYMUTIL@
DUMPBIN = @DUMPBIN@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGL_CFLAGS = @EGL_CFLAGS@
EGL_CLIENT_APIS = @EGL_CLIENT_APIS@
EGL_LIB_DEPS = @EGL_LIB_DEPS@
EGL_NATIVE_PLATFORM = @EGL_NATIVE_PLATFORM@
EGREP = @EGREP@
ELF_LIB = @ELF_LIB@
EXEEXT = @EXEEXT@
EXPAT_CFLAGS = @EXPAT_CFLAGS@
EXPAT_LIBS = @EXPAT_LIBS@
FGREP = @FGREP@
FREEDRENO_CFLAGS = @FREEDRENO_CFLAGS@
FREEDRENO_LIBS = @FREEDRENO_LIBS@
GALLIUM_PIPE_LOADER_CLIENT_DEFINES = @GALLIUM_PIPE_LOADER_CLIENT_DEFINES@
GALLIUM_PIPE_LOADER_CLIENT_LIBS = @GALLIUM_PIPE_LOADER_CLIENT_LIBS@
GALLIUM_PIPE_LOADER_DEFINES = @GALLIUM_PIPE_LOADER_DEFINES@
GALLIUM_PIPE_LOADER_LIBS = @GALLIUM_PIPE_LOADER_LIBS@
GALLIUM_PIPE_LOADER_XCB_CFLAGS = @GALLIUM_PIPE_LOADER_XCB_CFLAGS@
GALLIUM_PIPE_LOADER_XCB_LIBS = @GALLIUM_PIPE_LOADER_XCB_LIBS@
GBM_PC_LIB_PRIV = @GBM_PC_LIB_PRIV@
GBM_PC_REQ_PRIV = @GBM_PC_REQ_PRIV@
GC_SECTIONS = @GC_SECTIONS@
GLESv1_CM_LIB_DEPS = @GLESv1_CM_LIB_DEPS@
GLESv1_CM_PC_LIB_PRIV = @GLESv1_CM_PC_LIB_PRIV@
GLESv2_LIB_DEPS = @GLESv2_LIB_DEPS@
GLESv2_PC_LIB_PRIV = @GLESv2_PC_LIB_PRIV@
GLPROTO_CFLAGS = @GLPROTO_CFLAGS@
GLPROTO_LIBS = @GLPROTO_LIBS@
GLX_TLS = @GLX_TLS@
GL_LIB = @GL_LIB@
GL_LIB_DEPS = @GL_LIB_DEPS@
GL_PC_CFLAGS = @GL_PC_CFLAGS@
GL_PC_LIB_PRIV = @GL_PC_LIB_PRIV@
GL_PC_REQ_PRIV = @GL_PC_REQ_PRIV@
GREP = @GREP@
HAVE_XF86VIDMODE = @HAVE_XF86VIDMODE@
INDENT = @INDENT@
INDENT_FLAGS = @INDENT_FLAGS@
INSTALL = @INSTALL@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
INTEL_CFLAGS = @INTEL_CFLAGS@
INTEL_LIBS = @INTEL_LIBS@
LD = @LD@
LDFLAGS = @LDFLAGS@
LD_NO_UNDEFINED = @LD_NO_UNDEFINED@
LEX = @LEX@
LEXLIB = @LEXLIB@
LEX_OUTPUT_ROOT = @LEX_OUTPUT_ROOT@
LIBCLC_INCLUDEDIR = @LIBCLC_INCLUDEDIR@
LIBCLC_LIBEXECDIR = @LIBCLC_LIBEXECDIR@
LIBDRM_CFLAGS = @LIBDRM_CFLAGS@
LIBDRM_LIBS = @LIBDRM_LIBS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBSHA1_CFLAGS = @LIBSHA1_CFLAGS@
LIBSHA1_LIBS = @LIBSHA1_LIBS@
LIBTOOL = @LIBTOOL@
LIBUDEV_CFLAGS = @LIBUDEV_CFLAGS@
LIBUDEV_LIBS = @LIBUDEV_LIBS@
LIB_DIR = @LIB_DIR@
LIB_EXT = @LIB_EXT@
LIPO = @LIPO@
LLVM_BINDIR = @LLVM_BINDIR@
LLVM_CFLAGS = @LLVM_CFLAGS@
LLVM_CONFIG = @LLVM_CONFIG@
LLVM_CPPFLAGS = @LLVM_CPPFLAGS@
LLVM_CXXFLAGS = @LLVM_CXXFLAGS@
LLVM_INCLUDEDIR = @LLVM_INCLUDEDIR@
LLVM_LDFLAGS = @LLVM_LDFLAGS@
LLVM_LIBDIR = @LLVM_LIBDIR@
LLVM_LIBS = @LLVM_LIBS@
LLVM_VERSION = @LLVM_VERSION@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MESA_LLVM = @MESA_LLVM@
MKDIR_P = @MKDIR_P@
MSVC2008_COMPAT_CFLAGS = @MSVC2008_COMPAT_CFLAGS@
MSVC2008_COMPAT_CXXFLAGS = @MSVC2008_COMPAT_CXXFLAGS@
MSVC2013_COMPAT_CFLAGS = @MSVC2013_COMPAT_CFLAGS@
MSVC2013_COMPAT_CXXFLAGS = @MSVC2013_COMPAT_CXXFLAGS@
NINE_MAJOR = @NINE_MAJOR@
NINE_MINOR = @NINE_MINOR@
NINE_TINY = @NINE_TINY@
NINE_VERSION = @NINE_VERSION@
NM = @NM@
NMEDIT = @NMEDIT@
NOUVEAU_CFLAGS = @NOUVEAU_CFLAGS@
NOUVEAU_LIBS = @NOUVEAU_LIBS@
OBJDUMP = @OBJDUMP@
OBJEXT = @OBJEXT@
OMX_CFLAGS = @OMX_CFLAGS@
OMX_LIBS = @OMX_LIBS@
OMX_LIB_INSTALL_DIR = @OMX_LIB_INSTALL_DIR@
OPENCL_LIBNAME = @OPENCL_LIBNAME@
OPENSSL_CFLAGS = @OPENSSL_CFLAGS@
OPENSSL_LIBS = @OPENSSL_LIBS@
OSMESA_LIB = @OSMESA_LIB@
OSMESA_LIB_DEPS = @OSMESA_LIB_DEPS@
OSMESA_PC_LIB_PRIV = @OSMESA_PC_LIB_PRIV@
OSMESA_PC_REQ = @OSMESA_PC_REQ@
OSMESA_VERSION = @OSMESA_VERSION@
OTOOL = @OTOOL@
OTOOL64 = @OTOOL64@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_URL = @PACKAGE_URL@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
PKG_CONFIG = @PKG_CONFIG@
PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
POSIX_SHELL = @POSIX_SHELL@
PRESENTPROTO_CFLAGS = @PRESENTPROTO_CFLAGS@
PRESENTPROTO_LIBS = @PRESENTPROTO_LIBS@
PTHREAD_CC = @PTHREAD_CC@
PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
PTHREAD_LIBS = @PTHREAD_LIBS@
PYTHON2 = @PYTHON2@
RADEON_CFLAGS = @RADEON_CFLAGS@
RADEON_LIBS = @RADEON_LIBS@
RANLIB = @RANLIB@
SED = @SED@
SELINUX_CFLAGS = @SELINUX_CFLAGS@
SELINUX_LIBS = @SELINUX_LIBS@
SET_MAKE = @SET_MAKE@
SHA1_CFLAGS = @SHA1_CFLAGS@
SHA1_LIBS = @SHA1_LIBS@
SHELL = @SHELL@
SSE41_CFLAGS = @SSE41_CFLAGS@
STRIP = @STRIP@
VA_CFLAGS = @VA_CFLAGS@
VA_LIBS = @VA_LIBS@
VA_LIB_INSTALL_DIR = @VA_LIB_INSTALL_DIR@
VA_MAJOR = @VA_MAJOR@
VA_MINOR = @VA_MINOR@
VDPAU_CFLAGS = @VDPAU_CFLAGS@
VDPAU_LIBS = @VDPAU_LIBS@
VDPAU_LIB_INSTALL_DIR = @VDPAU_LIB_INSTALL_DIR@
VDPAU_MAJOR = @VDPAU_MAJOR@
VDPAU_MINOR = @VDPAU_MINOR@
VERSION = @VERSION@
VG_LIB_DEPS = @VG_LIB_DEPS@
VISIBILITY_CFLAGS = @VISIBILITY_CFLAGS@
VISIBILITY_CXXFLAGS = @VISIBILITY_CXXFLAGS@
VL_CFLAGS = @VL_CFLAGS@
VL_LIBS = @VL_LIBS@
WAYLAND_CFLAGS = @WAYLAND_CFLAGS@
WAYLAND_LIBS = @WAYLAND_LIBS@
WAYLAND_SCANNER = @WAYLAND_SCANNER@
WAYLAND_SCANNER_CFLAGS = @WAYLAND_SCANNER_CFLAGS@
WAYLAND_SCANNER_LIBS = @WAYLAND_SCANNER_LIBS@
X11_INCLUDES = @X11_INCLUDES@
XA_MAJOR = @XA_MAJOR@
XA_MINOR = @XA_MINOR@
XA_TINY = @XA_TINY@
XA_VERSION = @XA_VERSION@
XCB_DRI2_CFLAGS = @XCB_DRI2_CFLAGS@
XCB_DRI2_LIBS = @XCB_DRI2_LIBS@
XF86VIDMODE_CFLAGS = @XF86VIDMODE_CFLAGS@
XF86VIDMODE_LIBS = @XF86VIDMODE_LIBS@
XLIBGL_CFLAGS = @XLIBGL_CFLAGS@
XLIBGL_LIBS = @XLIBGL_LIBS@
XVMC_CFLAGS = @XVMC_CFLAGS@
XVMC_LIBS = @XVMC_LIBS@
XVMC_LIB_INSTALL_DIR = @XVMC_LIB_INSTALL_DIR@
XVMC_MAJOR = @XVMC_MAJOR@
XVMC_MINOR = @XVMC_MINOR@
YACC = @YACC@
YFLAGS = @YFLAGS@
abs_builddir = @abs_builddir@
abs_srcdir = @abs_srcdir@
abs_top_builddir = @abs_top_builddir@
abs_top_srcdir = @abs_top_srcdir@
ac_ct_AR = @ac_ct_AR@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
acv_mako_found = @acv_mako_found@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
am__tar = @am__tar@
am__untar = @am__untar@
ax_pthread_config = @ax_pthread_config@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
builddir = @builddir@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
ifGNUmake = @ifGNUmake@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
mkdir_p = @mkdir_p@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
srcdir = @srcdir@
sysconfdir = @sysconfdir@
target = @target@
target_alias = @target_alias@
target_cpu = @target_cpu@
target_os = @target_os@
target_vendor = @target_vendor@
top_build_prefix = @top_build_prefix@
top_builddir = @top_builddir@
top_srcdir = @top_srcdir@
C_SOURCES := \
radeon_drm_bo.c \
radeon_drm_bo.h \
radeon_drm_cs.c \
radeon_drm_cs_dump.c \
radeon_drm_cs.h \
radeon_drm_public.h \
radeon_drm_surface.c \
radeon_drm_winsys.c \
radeon_drm_winsys.h
 
TOOLS_HDR := \
radeon_ctx.h
 
GALLIUM_CFLAGS = \
-I$(top_srcdir)/include \
-I$(top_srcdir)/src \
-I$(top_srcdir)/src/gallium/include \
-I$(top_srcdir)/src/gallium/auxiliary \
$(DEFINES)
 
 
# src/gallium/auxiliary must appear before src/gallium/drivers
# because there are stupidly two rbug_context.h files in
# different directories, and which one is included by the
# preprocessor is determined by the ordering of the -I flags.
GALLIUM_DRIVER_CFLAGS = \
-I$(srcdir)/include \
-I$(top_srcdir)/src \
-I$(top_srcdir)/include \
-I$(top_srcdir)/src/gallium/include \
-I$(top_srcdir)/src/gallium/auxiliary \
-I$(top_srcdir)/src/gallium/drivers \
-I$(top_srcdir)/src/gallium/winsys \
$(DEFINES) \
$(VISIBILITY_CFLAGS)
 
GALLIUM_DRIVER_CXXFLAGS = \
-I$(srcdir)/include \
-I$(top_srcdir)/src \
-I$(top_srcdir)/include \
-I$(top_srcdir)/src/gallium/include \
-I$(top_srcdir)/src/gallium/auxiliary \
-I$(top_srcdir)/src/gallium/drivers \
-I$(top_srcdir)/src/gallium/winsys \
$(DEFINES) \
$(VISIBILITY_CXXFLAGS)
 
GALLIUM_TARGET_CFLAGS = \
-I$(top_srcdir)/src \
-I$(top_srcdir)/include \
-I$(top_srcdir)/src/loader \
-I$(top_srcdir)/src/gallium/include \
-I$(top_srcdir)/src/gallium/auxiliary \
-I$(top_srcdir)/src/gallium/drivers \
-I$(top_srcdir)/src/gallium/winsys \
$(DEFINES) \
$(PTHREAD_CFLAGS) \
$(LIBDRM_CFLAGS) \
$(VISIBILITY_CFLAGS)
 
GALLIUM_COMMON_LIB_DEPS = \
-lm \
$(CLOCK_LIB) \
$(PTHREAD_LIBS) \
$(DLOPEN_LIBS)
 
GALLIUM_WINSYS_CFLAGS = \
-I$(top_srcdir)/src \
-I$(top_srcdir)/include \
-I$(top_srcdir)/src/gallium/include \
-I$(top_srcdir)/src/gallium/auxiliary \
$(DEFINES) \
$(VISIBILITY_CFLAGS)
 
GALLIUM_PIPE_LOADER_WINSYS_LIBS = \
$(top_builddir)/src/gallium/winsys/sw/null/libws_null.la \
$(top_builddir)/src/gallium/winsys/sw/wrapper/libwsw.la \
$(am__append_1) $(am__append_2)
AM_CFLAGS = \
$(GALLIUM_WINSYS_CFLAGS) \
$(RADEON_CFLAGS)
 
noinst_LTLIBRARIES = libradeonwinsys.la
libradeonwinsys_la_SOURCES = $(C_SOURCES)
EXTRA_DIST = $(TOOLS_HDR)
all: all-am
 
.SUFFIXES:
.SUFFIXES: .c .lo .o .obj
$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(srcdir)/Makefile.sources $(top_srcdir)/src/gallium/Automake.inc $(am__configure_deps)
@for dep in $?; do \
case '$(am__configure_deps)' in \
*$$dep*) \
( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
&& { if test -f $@; then exit 0; else break; fi; }; \
exit 1;; \
esac; \
done; \
echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/gallium/winsys/radeon/drm/Makefile'; \
$(am__cd) $(top_srcdir) && \
$(AUTOMAKE) --foreign src/gallium/winsys/radeon/drm/Makefile
Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
@case '$?' in \
*config.status*) \
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
*) \
echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
esac;
$(srcdir)/Makefile.sources $(top_srcdir)/src/gallium/Automake.inc $(am__empty):
 
$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
 
$(top_srcdir)/configure: $(am__configure_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(ACLOCAL_M4): $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
 
clean-noinstLTLIBRARIES:
-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
@list='$(noinst_LTLIBRARIES)'; \
locs=`for p in $$list; do echo $$p; done | \
sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
sort -u`; \
test -z "$$locs" || { \
echo rm -f $${locs}; \
rm -f $${locs}; \
}
 
libradeonwinsys.la: $(libradeonwinsys_la_OBJECTS) $(libradeonwinsys_la_DEPENDENCIES) $(EXTRA_libradeonwinsys_la_DEPENDENCIES)
$(AM_V_CCLD)$(LINK) $(libradeonwinsys_la_OBJECTS) $(libradeonwinsys_la_LIBADD) $(LIBS)
 
mostlyclean-compile:
-rm -f *.$(OBJEXT)
 
distclean-compile:
-rm -f *.tab.c
 
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_bo.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_cs.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_cs_dump.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_surface.Plo@am__quote@
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/radeon_drm_winsys.Plo@am__quote@
 
.c.o:
@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
 
.c.obj:
@am__fastdepCC_TRUE@ $(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
 
.c.lo:
@am__fastdepCC_TRUE@ $(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
@am__fastdepCC_TRUE@ $(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
@AMDEP_TRUE@@am__fastdepCC_FALSE@ $(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
@AMDEP_TRUE@@am__fastdepCC_FALSE@ DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
@am__fastdepCC_FALSE@ $(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
 
mostlyclean-libtool:
-rm -f *.lo
 
clean-libtool:
-rm -rf .libs _libs
 
ID: $(am__tagged_files)
$(am__define_uniq_tagged_files); mkid -fID $$unique
tags: tags-am
TAGS: tags
 
tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
set x; \
here=`pwd`; \
$(am__define_uniq_tagged_files); \
shift; \
if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
test -n "$$unique" || unique=$$empty_fix; \
if test $$# -gt 0; then \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
"$$@" $$unique; \
else \
$(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$unique; \
fi; \
fi
ctags: ctags-am
 
CTAGS: ctags
ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
$(am__define_uniq_tagged_files); \
test -z "$(CTAGS_ARGS)$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$unique
 
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& $(am__cd) $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) "$$here"
cscopelist: cscopelist-am
 
cscopelist-am: $(am__tagged_files)
list='$(am__tagged_files)'; \
case "$(srcdir)" in \
[\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
*) sdir=$(subdir)/$(srcdir) ;; \
esac; \
for i in $$list; do \
if test -f "$$i"; then \
echo "$(subdir)/$$i"; \
else \
echo "$$sdir/$$i"; \
fi; \
done >> $(top_builddir)/cscope.files
 
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
 
distdir: $(DISTFILES)
@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
list='$(DISTFILES)'; \
dist_files=`for file in $$list; do echo $$file; done | \
sed -e "s|^$$srcdirstrip/||;t" \
-e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
case $$dist_files in \
*/*) $(MKDIR_P) `echo "$$dist_files" | \
sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
sort -u` ;; \
esac; \
for file in $$dist_files; do \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
if test -d $$d/$$file; then \
dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
if test -d "$(distdir)/$$file"; then \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
fi; \
cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
else \
test -f "$(distdir)/$$file" \
|| cp -p $$d/$$file "$(distdir)/$$file" \
|| exit 1; \
fi; \
done
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES)
installdirs:
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
 
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
 
installcheck: installcheck-am
install-strip:
if test -z '$(STRIP)'; then \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
install; \
else \
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
"INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
fi
mostlyclean-generic:
 
clean-generic:
 
distclean-generic:
-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
 
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
 
clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
mostlyclean-am
 
distclean: distclean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic \
distclean-tags
 
dvi: dvi-am
 
dvi-am:
 
html: html-am
 
html-am:
 
info: info-am
 
info-am:
 
install-data-am:
 
install-dvi: install-dvi-am
 
install-dvi-am:
 
install-exec-am:
 
install-html: install-html-am
 
install-html-am:
 
install-info: install-info-am
 
install-info-am:
 
install-man:
 
install-pdf: install-pdf-am
 
install-pdf-am:
 
install-ps: install-ps-am
 
install-ps-am:
 
installcheck-am:
 
maintainer-clean: maintainer-clean-am
-rm -rf ./$(DEPDIR)
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
 
mostlyclean: mostlyclean-am
 
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
 
pdf: pdf-am
 
pdf-am:
 
ps: ps-am
 
ps-am:
 
uninstall-am:
 
.MAKE: install-am install-strip
 
.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
clean-libtool clean-noinstLTLIBRARIES cscopelist-am ctags \
ctags-am distclean distclean-compile distclean-generic \
distclean-libtool distclean-tags distdir dvi dvi-am html \
html-am info info-am install install-am install-data \
install-data-am install-dvi install-dvi-am install-exec \
install-exec-am install-html install-html-am install-info \
install-info-am install-man install-pdf install-pdf-am \
install-ps install-ps-am install-strip installcheck \
installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags tags-am uninstall uninstall-am
 
.PRECIOUS: Makefile
 
 
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/Makefile.sources
0,0 → 1,13
C_SOURCES := \
radeon_drm_bo.c \
radeon_drm_bo.h \
radeon_drm_cs.c \
radeon_drm_cs_dump.c \
radeon_drm_cs.h \
radeon_drm_public.h \
radeon_drm_surface.c \
radeon_drm_winsys.c \
radeon_drm_winsys.h
 
TOOLS_HDR := \
radeon_ctx.h
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_ctx.h
0,0 → 1,205
/*
* Copyright 2011 Jerome Glisse <glisse@freedesktop.org>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jérôme Glisse
*/
#ifndef RADEON_CTX_H
#define RADEON_CTX_H
 
#define _FILE_OFFSET_BITS 64
#include <sys/mman.h>
 
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include "xf86drm.h"
#include "radeon_drm.h"
 
struct ctx {
int fd;
};
 
struct bo {
uint32_t handle;
uint32_t alignment;
uint64_t size;
uint64_t va;
void *ptr;
};
 
static void ctx_init(struct ctx *ctx)
{
ctx->fd = drmOpen("radeon", NULL);
if (ctx->fd < 0) {
fprintf(stderr, "failed to open radeon drm device file\n");
exit(-1);
}
}
 
static void bo_wait(struct ctx *ctx, struct bo *bo)
{
struct drm_radeon_gem_wait_idle args;
void *ptr;
int r;
 
/* Zero out args to make valgrind happy */
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
do {
r = drmCommandWrite(ctx->fd, DRM_RADEON_GEM_WAIT_IDLE, &args, sizeof(args));
} while (r == -EBUSY);
}
 
 
static void ctx_cs(struct ctx *ctx, uint32_t *cs, uint32_t cs_flags[2], unsigned ndw,
struct bo **bo, uint32_t *bo_relocs, unsigned nbo)
{
struct drm_radeon_cs args;
struct drm_radeon_cs_chunk chunks[3];
uint64_t chunk_array[3];
unsigned i;
int r;
 
/* update handle */
for (i = 0; i < nbo; i++) {
bo_relocs[i*4+0] = bo[i]->handle;
}
 
args.num_chunks = 2;
if (cs_flags[0] || cs_flags[1]) {
/* enable RADEON_CHUNK_ID_FLAGS */
args.num_chunks = 3;
}
args.chunks = (uint64_t)(uintptr_t)chunk_array;
chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
chunks[0].length_dw = ndw;
chunks[0].chunk_data = (uintptr_t)cs;
chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
chunks[1].length_dw = nbo * 4;
chunks[1].chunk_data = (uintptr_t)bo_relocs;
chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
chunks[2].length_dw = 2;
chunks[2].chunk_data = (uintptr_t)cs_flags;
chunk_array[0] = (uintptr_t)&chunks[0];
chunk_array[1] = (uintptr_t)&chunks[1];
chunk_array[2] = (uintptr_t)&chunks[2];
 
fprintf(stderr, "emiting cs %ddw with %d bo\n", ndw, nbo);
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_CS, &args, sizeof(args));
if (r) {
fprintf(stderr, "cs submission failed with %d\n", r);
return;
}
}
 
static void bo_map(struct ctx *ctx, struct bo *bo)
{
struct drm_radeon_gem_mmap args;
void *ptr;
int r;
 
/* Zero out args to make valgrind happy */
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
args.offset = 0;
args.size = (uint64_t)bo->size;
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_GEM_MMAP, &args, sizeof(args));
if (r) {
fprintf(stderr, "error mapping %p 0x%08X (error = %d)\n", bo, bo->handle, r);
exit(-1);
}
ptr = mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED, ctx->fd, args.addr_ptr);
if (ptr == MAP_FAILED) {
fprintf(stderr, "%s failed to map bo\n", __func__);
exit(-1);
}
bo->ptr = ptr;
}
 
static void bo_va(struct ctx *ctx, struct bo *bo)
{
struct drm_radeon_gem_va args;
int r;
 
args.handle = bo->handle;
args.vm_id = 0;
args.operation = RADEON_VA_MAP;
args.flags = RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_WRITEABLE | RADEON_VM_PAGE_SNOOPED;
args.offset = bo->va;
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_GEM_VA, &args, sizeof(args));
if (r && args.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", bo->size);
fprintf(stderr, "radeon: alignment : %d bytes\n", bo->alignment);
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
exit(-1);
}
}
 
static struct bo *bo_new(struct ctx *ctx, unsigned ndw, uint32_t *data, uint64_t va, uint32_t alignment)
{
struct drm_radeon_gem_create args;
struct bo *bo;
int r;
 
bo = calloc(1, sizeof(*bo));
if (bo == NULL) {
fprintf(stderr, "failed to malloc bo struct\n");
exit(-1);
}
bo->size = ndw * 4ULL;
bo->va = va;
bo->alignment = alignment;
 
args.size = bo->size;
args.alignment = bo->alignment;
args.initial_domain = RADEON_GEM_DOMAIN_GTT;
args.flags = 0;
args.handle = 0;
 
r = drmCommandWriteRead(ctx->fd, DRM_RADEON_GEM_CREATE, &args, sizeof(args));
bo->handle = args.handle;
if (r) {
fprintf(stderr, "Failed to allocate :\n");
fprintf(stderr, " size : %d bytes\n", bo->size);
fprintf(stderr, " alignment : %d bytes\n", bo->alignment);
free(bo);
exit(-1);
}
 
if (data) {
bo_map(ctx, bo);
memcpy(bo->ptr, data, bo->size);
}
 
if (va) {
bo_va(ctx, bo);
}
 
return bo;
}
 
 
#endif
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_bo.c
0,0 → 1,1135
/*
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
 
#include "radeon_drm_cs.h"
 
#include "util/u_hash_table.h"
#include "util/u_memory.h"
#include "util/simple_list.h"
#include "util/list.h"
#include "os/os_thread.h"
#include "os/os_mman.h"
#include "os/os_time.h"
 
#include "state_tracker/drm_driver.h"
 
#include <sys/ioctl.h>
#include <xf86drm.h>
#include <errno.h>
#include <fcntl.h>
#include <stdio.h>
 
static const struct pb_vtbl radeon_bo_vtbl;
 
static INLINE struct radeon_bo *radeon_bo(struct pb_buffer *bo)
{
assert(bo->vtbl == &radeon_bo_vtbl);
return (struct radeon_bo *)bo;
}
 
struct radeon_bo_va_hole {
struct list_head list;
uint64_t offset;
uint64_t size;
};
 
struct radeon_bomgr {
/* Base class. */
struct pb_manager base;
 
/* Winsys. */
struct radeon_drm_winsys *rws;
 
/* List of buffer GEM names. Protected by bo_handles_mutex. */
struct util_hash_table *bo_names;
/* List of buffer handles. Protectded by bo_handles_mutex. */
struct util_hash_table *bo_handles;
/* List of buffer virtual memory ranges. Protectded by bo_handles_mutex. */
struct util_hash_table *bo_vas;
pipe_mutex bo_handles_mutex;
pipe_mutex bo_va_mutex;
 
/* is virtual address supported */
bool va;
uint64_t va_offset;
struct list_head va_holes;
};
 
static INLINE struct radeon_bomgr *radeon_bomgr(struct pb_manager *mgr)
{
return (struct radeon_bomgr *)mgr;
}
 
static struct radeon_bo *get_radeon_bo(struct pb_buffer *_buf)
{
struct radeon_bo *bo = NULL;
 
if (_buf->vtbl == &radeon_bo_vtbl) {
bo = radeon_bo(_buf);
} else {
struct pb_buffer *base_buf;
pb_size offset;
pb_get_base_buffer(_buf, &base_buf, &offset);
 
if (base_buf->vtbl == &radeon_bo_vtbl)
bo = radeon_bo(base_buf);
}
 
return bo;
}
 
static void radeon_bo_wait(struct pb_buffer *_buf, enum radeon_bo_usage usage)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct drm_radeon_gem_wait_idle args = {0};
 
while (p_atomic_read(&bo->num_active_ioctls)) {
sched_yield();
}
 
args.handle = bo->handle;
while (drmCommandWrite(bo->rws->fd, DRM_RADEON_GEM_WAIT_IDLE,
&args, sizeof(args)) == -EBUSY);
}
 
static boolean radeon_bo_is_busy(struct pb_buffer *_buf,
enum radeon_bo_usage usage)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct drm_radeon_gem_busy args = {0};
 
if (p_atomic_read(&bo->num_active_ioctls)) {
return TRUE;
}
 
args.handle = bo->handle;
return drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_BUSY,
&args, sizeof(args)) != 0;
}
 
static enum radeon_bo_domain get_valid_domain(enum radeon_bo_domain domain)
{
/* Zero domains the driver doesn't understand. */
domain &= RADEON_DOMAIN_VRAM_GTT;
 
/* If no domain is set, we must set something... */
if (!domain)
domain = RADEON_DOMAIN_VRAM_GTT;
 
return domain;
}
 
static enum radeon_bo_domain radeon_bo_get_initial_domain(
struct radeon_winsys_cs_handle *buf)
{
struct radeon_bo *bo = (struct radeon_bo*)buf;
struct drm_radeon_gem_op args;
 
if (bo->rws->info.drm_minor < 38)
return RADEON_DOMAIN_VRAM_GTT;
 
memset(&args, 0, sizeof(args));
args.handle = bo->handle;
args.op = RADEON_GEM_OP_GET_INITIAL_DOMAIN;
 
drmCommandWriteRead(bo->rws->fd, DRM_RADEON_GEM_OP,
&args, sizeof(args));
 
/* GEM domains and winsys domains are defined the same. */
return get_valid_domain(args.value);
}
 
static uint64_t radeon_bomgr_find_va(struct radeon_bomgr *mgr, uint64_t size, uint64_t alignment)
{
struct radeon_bo_va_hole *hole, *n;
uint64_t offset = 0, waste = 0;
 
alignment = MAX2(alignment, 4096);
size = align(size, 4096);
 
pipe_mutex_lock(mgr->bo_va_mutex);
/* first look for a hole */
LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
offset = hole->offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
offset += waste;
if (offset >= (hole->offset + hole->size)) {
continue;
}
if (!waste && hole->size == size) {
offset = hole->offset;
list_del(&hole->list);
FREE(hole);
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
if ((hole->size - waste) > size) {
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
n->offset = hole->offset;
list_add(&n->list, &hole->list);
}
hole->size -= (size + waste);
hole->offset += size + waste;
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
if ((hole->size - waste) == size) {
hole->size = waste;
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
}
 
offset = mgr->va_offset;
waste = offset % alignment;
waste = waste ? alignment - waste : 0;
if (waste) {
n = CALLOC_STRUCT(radeon_bo_va_hole);
n->size = waste;
n->offset = offset;
list_add(&n->list, &mgr->va_holes);
}
offset += waste;
mgr->va_offset += size + waste;
pipe_mutex_unlock(mgr->bo_va_mutex);
return offset;
}
 
static void radeon_bomgr_free_va(struct radeon_bomgr *mgr, uint64_t va, uint64_t size)
{
struct radeon_bo_va_hole *hole;
 
size = align(size, 4096);
 
pipe_mutex_lock(mgr->bo_va_mutex);
if ((va + size) == mgr->va_offset) {
mgr->va_offset = va;
/* Delete uppermost hole if it reaches the new top */
if (!LIST_IS_EMPTY(&mgr->va_holes)) {
hole = container_of(mgr->va_holes.next, hole, list);
if ((hole->offset + hole->size) == va) {
mgr->va_offset = hole->offset;
list_del(&hole->list);
FREE(hole);
}
}
} else {
struct radeon_bo_va_hole *next;
 
hole = container_of(&mgr->va_holes, hole, list);
LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
if (next->offset < va)
break;
hole = next;
}
 
if (&hole->list != &mgr->va_holes) {
/* Grow upper hole if it's adjacent */
if (hole->offset == (va + size)) {
hole->offset = va;
hole->size += size;
/* Merge lower hole if it's adjacent */
if (next != hole && &next->list != &mgr->va_holes &&
(next->offset + next->size) == va) {
next->size += hole->size;
list_del(&hole->list);
FREE(hole);
}
goto out;
}
}
 
/* Grow lower hole if it's adjacent */
if (next != hole && &next->list != &mgr->va_holes &&
(next->offset + next->size) == va) {
next->size += size;
goto out;
}
 
/* FIXME on allocation failure we just lose virtual address space
* maybe print a warning
*/
next = CALLOC_STRUCT(radeon_bo_va_hole);
if (next) {
next->size = size;
next->offset = va;
list_add(&next->list, &hole->list);
}
}
out:
pipe_mutex_unlock(mgr->bo_va_mutex);
}
 
static void radeon_bo_destroy(struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
struct radeon_bomgr *mgr = bo->mgr;
struct drm_gem_close args;
 
memset(&args, 0, sizeof(args));
 
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
util_hash_table_remove(bo->mgr->bo_handles, (void*)(uintptr_t)bo->handle);
if (bo->flink_name) {
util_hash_table_remove(bo->mgr->bo_names,
(void*)(uintptr_t)bo->flink_name);
}
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
 
if (bo->ptr)
os_munmap(bo->ptr, bo->base.size);
 
/* Close object. */
args.handle = bo->handle;
drmIoctl(bo->rws->fd, DRM_IOCTL_GEM_CLOSE, &args);
 
if (mgr->va) {
radeon_bomgr_free_va(mgr, bo->va, bo->base.size);
}
 
pipe_mutex_destroy(bo->map_mutex);
 
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
bo->rws->allocated_vram -= align(bo->base.size, 4096);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
bo->rws->allocated_gtt -= align(bo->base.size, 4096);
FREE(bo);
}
 
void *radeon_bo_do_map(struct radeon_bo *bo)
{
struct drm_radeon_gem_mmap args = {0};
void *ptr;
 
/* If the buffer is created from user memory, return the user pointer. */
if (bo->user_ptr)
return bo->user_ptr;
 
/* Return the pointer if it's already mapped. */
if (bo->ptr)
return bo->ptr;
 
/* Map the buffer. */
pipe_mutex_lock(bo->map_mutex);
/* Return the pointer if it's already mapped (in case of a race). */
if (bo->ptr) {
pipe_mutex_unlock(bo->map_mutex);
return bo->ptr;
}
args.handle = bo->handle;
args.offset = 0;
args.size = (uint64_t)bo->base.size;
if (drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_MMAP,
&args,
sizeof(args))) {
pipe_mutex_unlock(bo->map_mutex);
fprintf(stderr, "radeon: gem_mmap failed: %p 0x%08X\n",
bo, bo->handle);
return NULL;
}
 
ptr = os_mmap(0, args.size, PROT_READ|PROT_WRITE, MAP_SHARED,
bo->rws->fd, args.addr_ptr);
if (ptr == MAP_FAILED) {
pipe_mutex_unlock(bo->map_mutex);
fprintf(stderr, "radeon: mmap failed, errno: %i\n", errno);
return NULL;
}
bo->ptr = ptr;
pipe_mutex_unlock(bo->map_mutex);
 
return bo->ptr;
}
 
static void *radeon_bo_map(struct radeon_winsys_cs_handle *buf,
struct radeon_winsys_cs *rcs,
enum pipe_transfer_usage usage)
{
struct radeon_bo *bo = (struct radeon_bo*)buf;
struct radeon_drm_cs *cs = (struct radeon_drm_cs*)rcs;
 
/* If it's not unsynchronized bo_map, flush CS if needed and then wait. */
if (!(usage & PIPE_TRANSFER_UNSYNCHRONIZED)) {
/* DONTBLOCK doesn't make sense with UNSYNCHRONIZED. */
if (usage & PIPE_TRANSFER_DONTBLOCK) {
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
 
if (radeon_bo_is_busy((struct pb_buffer*)bo,
RADEON_USAGE_WRITE)) {
return NULL;
}
} else {
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
return NULL;
}
 
if (radeon_bo_is_busy((struct pb_buffer*)bo,
RADEON_USAGE_READWRITE)) {
return NULL;
}
}
} else {
uint64_t time = os_time_get_nano();
 
if (!(usage & PIPE_TRANSFER_WRITE)) {
/* Mapping for read.
*
* Since we are mapping for read, we don't need to wait
* if the GPU is using the buffer for read too
* (neither one is changing it).
*
* Only check whether the buffer is being used for write. */
if (cs && radeon_bo_is_referenced_by_cs_for_write(cs, bo)) {
cs->flush_cs(cs->flush_data, 0, NULL);
}
radeon_bo_wait((struct pb_buffer*)bo,
RADEON_USAGE_WRITE);
} else {
/* Mapping for write. */
if (cs) {
if (radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, 0, NULL);
} else {
/* Try to avoid busy-waiting in radeon_bo_wait. */
if (p_atomic_read(&bo->num_active_ioctls))
radeon_drm_cs_sync_flush(rcs);
}
}
 
radeon_bo_wait((struct pb_buffer*)bo, RADEON_USAGE_READWRITE);
}
 
bo->mgr->rws->buffer_wait_time += os_time_get_nano() - time;
}
}
 
return radeon_bo_do_map(bo);
}
 
static void radeon_bo_unmap(struct radeon_winsys_cs_handle *_buf)
{
/* NOP */
}
 
static void radeon_bo_get_base_buffer(struct pb_buffer *buf,
struct pb_buffer **base_buf,
unsigned *offset)
{
*base_buf = buf;
*offset = 0;
}
 
static enum pipe_error radeon_bo_validate(struct pb_buffer *_buf,
struct pb_validate *vl,
unsigned flags)
{
/* Always pinned */
return PIPE_OK;
}
 
static void radeon_bo_fence(struct pb_buffer *buf,
struct pipe_fence_handle *fence)
{
}
 
static const struct pb_vtbl radeon_bo_vtbl = {
radeon_bo_destroy,
NULL, /* never called */
NULL, /* never called */
radeon_bo_validate,
radeon_bo_fence,
radeon_bo_get_base_buffer,
};
 
#ifndef RADEON_GEM_GTT_WC
#define RADEON_GEM_GTT_WC (1 << 2)
#endif
#ifndef RADEON_GEM_CPU_ACCESS
/* BO is expected to be accessed by the CPU */
#define RADEON_GEM_CPU_ACCESS (1 << 3)
#endif
#ifndef RADEON_GEM_NO_CPU_ACCESS
/* CPU access is not expected to work for this BO */
#define RADEON_GEM_NO_CPU_ACCESS (1 << 4)
#endif
 
static struct pb_buffer *radeon_bomgr_create_bo(struct pb_manager *_mgr,
pb_size size,
const struct pb_desc *desc)
{
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
struct radeon_drm_winsys *rws = mgr->rws;
struct radeon_bo *bo;
struct drm_radeon_gem_create args;
struct radeon_bo_desc *rdesc = (struct radeon_bo_desc*)desc;
int r;
 
memset(&args, 0, sizeof(args));
 
assert(rdesc->initial_domains);
assert((rdesc->initial_domains &
~(RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM)) == 0);
 
args.size = size;
args.alignment = desc->alignment;
args.initial_domain = rdesc->initial_domains;
args.flags = 0;
 
if (rdesc->flags & RADEON_FLAG_GTT_WC)
args.flags |= RADEON_GEM_GTT_WC;
if (rdesc->flags & RADEON_FLAG_CPU_ACCESS)
args.flags |= RADEON_GEM_CPU_ACCESS;
if (rdesc->flags & RADEON_FLAG_NO_CPU_ACCESS)
args.flags |= RADEON_GEM_NO_CPU_ACCESS;
 
if (drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_CREATE,
&args, sizeof(args))) {
fprintf(stderr, "radeon: Failed to allocate a buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
fprintf(stderr, "radeon: flags : %d\n", args.flags);
return NULL;
}
 
bo = CALLOC_STRUCT(radeon_bo);
if (!bo)
return NULL;
 
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = desc->alignment;
bo->base.usage = desc->usage;
bo->base.size = size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->mgr = mgr;
bo->rws = mgr->rws;
bo->handle = args.handle;
bo->va = 0;
bo->initial_domain = rdesc->initial_domains;
pipe_mutex_init(bo->map_mutex);
 
if (mgr->va) {
struct drm_radeon_gem_va va;
 
bo->va = radeon_bomgr_find_va(mgr, size, desc->alignment);
 
va.handle = bo->handle;
va.vm_id = 0;
va.operation = RADEON_VA_MAP;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(rws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to allocate virtual address for buffer:\n");
fprintf(stderr, "radeon: size : %d bytes\n", size);
fprintf(stderr, "radeon: alignment : %d bytes\n", desc->alignment);
fprintf(stderr, "radeon: domains : %d\n", args.initial_domain);
fprintf(stderr, "radeon: va : 0x%016llx\n", (unsigned long long)bo->va);
radeon_bo_destroy(&bo->base);
return NULL;
}
pipe_mutex_lock(mgr->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
 
pipe_mutex_unlock(mgr->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
 
util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
pipe_mutex_unlock(mgr->bo_handles_mutex);
}
 
if (rdesc->initial_domains & RADEON_DOMAIN_VRAM)
rws->allocated_vram += align(size, 4096);
else if (rdesc->initial_domains & RADEON_DOMAIN_GTT)
rws->allocated_gtt += align(size, 4096);
 
return &bo->base;
}
 
static void radeon_bomgr_flush(struct pb_manager *mgr)
{
/* NOP */
}
 
/* This is for the cache bufmgr. */
static boolean radeon_bomgr_is_buffer_busy(struct pb_manager *_mgr,
struct pb_buffer *_buf)
{
struct radeon_bo *bo = radeon_bo(_buf);
 
if (radeon_bo_is_referenced_by_any_cs(bo)) {
return TRUE;
}
 
if (radeon_bo_is_busy((struct pb_buffer*)bo, RADEON_USAGE_READWRITE)) {
return TRUE;
}
 
return FALSE;
}
 
static void radeon_bomgr_destroy(struct pb_manager *_mgr)
{
struct radeon_bomgr *mgr = radeon_bomgr(_mgr);
util_hash_table_destroy(mgr->bo_names);
util_hash_table_destroy(mgr->bo_handles);
util_hash_table_destroy(mgr->bo_vas);
pipe_mutex_destroy(mgr->bo_handles_mutex);
pipe_mutex_destroy(mgr->bo_va_mutex);
FREE(mgr);
}
 
#define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
 
static unsigned handle_hash(void *key)
{
return PTR_TO_UINT(key);
}
 
static int handle_compare(void *key1, void *key2)
{
return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
}
 
struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws)
{
struct radeon_bomgr *mgr;
 
mgr = CALLOC_STRUCT(radeon_bomgr);
if (!mgr)
return NULL;
 
mgr->base.destroy = radeon_bomgr_destroy;
mgr->base.create_buffer = radeon_bomgr_create_bo;
mgr->base.flush = radeon_bomgr_flush;
mgr->base.is_buffer_busy = radeon_bomgr_is_buffer_busy;
 
mgr->rws = rws;
mgr->bo_names = util_hash_table_create(handle_hash, handle_compare);
mgr->bo_handles = util_hash_table_create(handle_hash, handle_compare);
mgr->bo_vas = util_hash_table_create(handle_hash, handle_compare);
pipe_mutex_init(mgr->bo_handles_mutex);
pipe_mutex_init(mgr->bo_va_mutex);
 
mgr->va = rws->info.r600_virtual_address;
mgr->va_offset = rws->va_start;
list_inithead(&mgr->va_holes);
 
return &mgr->base;
}
 
static unsigned eg_tile_split(unsigned tile_split)
{
switch (tile_split) {
case 0: tile_split = 64; break;
case 1: tile_split = 128; break;
case 2: tile_split = 256; break;
case 3: tile_split = 512; break;
default:
case 4: tile_split = 1024; break;
case 5: tile_split = 2048; break;
case 6: tile_split = 4096; break;
}
return tile_split;
}
 
static unsigned eg_tile_split_rev(unsigned eg_tile_split)
{
switch (eg_tile_split) {
case 64: return 0;
case 128: return 1;
case 256: return 2;
case 512: return 3;
default:
case 1024: return 4;
case 2048: return 5;
case 4096: return 6;
}
}
 
static void radeon_bo_get_tiling(struct pb_buffer *_buf,
enum radeon_bo_layout *microtiled,
enum radeon_bo_layout *macrotiled,
unsigned *bankw, unsigned *bankh,
unsigned *tile_split,
unsigned *stencil_tile_split,
unsigned *mtilea,
bool *scanout)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct drm_radeon_gem_set_tiling args;
 
memset(&args, 0, sizeof(args));
 
args.handle = bo->handle;
 
drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_GET_TILING,
&args,
sizeof(args));
 
*microtiled = RADEON_LAYOUT_LINEAR;
*macrotiled = RADEON_LAYOUT_LINEAR;
if (args.tiling_flags & RADEON_TILING_MICRO)
*microtiled = RADEON_LAYOUT_TILED;
else if (args.tiling_flags & RADEON_TILING_MICRO_SQUARE)
*microtiled = RADEON_LAYOUT_SQUARETILED;
 
if (args.tiling_flags & RADEON_TILING_MACRO)
*macrotiled = RADEON_LAYOUT_TILED;
if (bankw && tile_split && stencil_tile_split && mtilea && tile_split) {
*bankw = (args.tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
*bankh = (args.tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
*tile_split = (args.tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
*stencil_tile_split = (args.tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
*mtilea = (args.tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
*tile_split = eg_tile_split(*tile_split);
}
if (scanout)
*scanout = bo->rws->gen >= DRV_SI && !(args.tiling_flags & RADEON_TILING_R600_NO_SCANOUT);
}
 
static void radeon_bo_set_tiling(struct pb_buffer *_buf,
struct radeon_winsys_cs *rcs,
enum radeon_bo_layout microtiled,
enum radeon_bo_layout macrotiled,
unsigned bankw, unsigned bankh,
unsigned tile_split,
unsigned stencil_tile_split,
unsigned mtilea,
uint32_t pitch,
bool scanout)
{
struct radeon_bo *bo = get_radeon_bo(_buf);
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct drm_radeon_gem_set_tiling args;
 
memset(&args, 0, sizeof(args));
 
/* Tiling determines how DRM treats the buffer data.
* We must flush CS when changing it if the buffer is referenced. */
if (cs && radeon_bo_is_referenced_by_cs(cs, bo)) {
cs->flush_cs(cs->flush_data, 0, NULL);
}
 
while (p_atomic_read(&bo->num_active_ioctls)) {
sched_yield();
}
 
if (microtiled == RADEON_LAYOUT_TILED)
args.tiling_flags |= RADEON_TILING_MICRO;
else if (microtiled == RADEON_LAYOUT_SQUARETILED)
args.tiling_flags |= RADEON_TILING_MICRO_SQUARE;
 
if (macrotiled == RADEON_LAYOUT_TILED)
args.tiling_flags |= RADEON_TILING_MACRO;
 
args.tiling_flags |= (bankw & RADEON_TILING_EG_BANKW_MASK) <<
RADEON_TILING_EG_BANKW_SHIFT;
args.tiling_flags |= (bankh & RADEON_TILING_EG_BANKH_MASK) <<
RADEON_TILING_EG_BANKH_SHIFT;
if (tile_split) {
args.tiling_flags |= (eg_tile_split_rev(tile_split) &
RADEON_TILING_EG_TILE_SPLIT_MASK) <<
RADEON_TILING_EG_TILE_SPLIT_SHIFT;
}
args.tiling_flags |= (stencil_tile_split &
RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK) <<
RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT;
args.tiling_flags |= (mtilea & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK) <<
RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT;
 
if (bo->rws->gen >= DRV_SI && !scanout)
args.tiling_flags |= RADEON_TILING_R600_NO_SCANOUT;
 
args.handle = bo->handle;
args.pitch = pitch;
 
drmCommandWriteRead(bo->rws->fd,
DRM_RADEON_GEM_SET_TILING,
&args,
sizeof(args));
}
 
static struct radeon_winsys_cs_handle *radeon_drm_get_cs_handle(struct pb_buffer *_buf)
{
/* return radeon_bo. */
return (struct radeon_winsys_cs_handle*)get_radeon_bo(_buf);
}
 
static struct pb_buffer *
radeon_winsys_bo_create(struct radeon_winsys *rws,
unsigned size,
unsigned alignment,
boolean use_reusable_pool,
enum radeon_bo_domain domain,
enum radeon_bo_flag flags)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
struct radeon_bo_desc desc;
struct pb_manager *provider;
struct pb_buffer *buffer;
 
memset(&desc, 0, sizeof(desc));
desc.base.alignment = alignment;
 
/* Only set one usage bit each for domains and flags, or the cache manager
* might consider different sets of domains / flags compatible
*/
if (domain == RADEON_DOMAIN_VRAM_GTT)
desc.base.usage = 1 << 2;
else
desc.base.usage = domain >> 1;
assert(flags < sizeof(desc.base.usage) * 8 - 3);
desc.base.usage |= 1 << (flags + 3);
 
desc.initial_domains = domain;
desc.flags = flags;
 
/* Assign a buffer manager. */
if (use_reusable_pool)
provider = ws->cman;
else
provider = ws->kman;
 
buffer = provider->create_buffer(provider, size, &desc.base);
if (!buffer)
return NULL;
 
pipe_mutex_lock(mgr->bo_handles_mutex);
util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)get_radeon_bo(buffer)->handle, buffer);
pipe_mutex_unlock(mgr->bo_handles_mutex);
 
return (struct pb_buffer*)buffer;
}
 
static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
void *pointer, unsigned size)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
struct drm_radeon_gem_userptr args;
struct radeon_bo *bo;
int r;
 
bo = CALLOC_STRUCT(radeon_bo);
if (!bo)
return NULL;
 
memset(&args, 0, sizeof(args));
args.addr = (uintptr_t)pointer;
args.size = align(size, sysconf(_SC_PAGE_SIZE));
args.flags = RADEON_GEM_USERPTR_ANONONLY |
RADEON_GEM_USERPTR_VALIDATE |
RADEON_GEM_USERPTR_REGISTER;
if (drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
&args, sizeof(args))) {
FREE(bo);
return NULL;
}
 
pipe_mutex_lock(mgr->bo_handles_mutex);
 
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->handle = args.handle;
bo->base.alignment = 0;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->base.size = size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->mgr = mgr;
bo->rws = mgr->rws;
bo->user_ptr = pointer;
bo->va = 0;
bo->initial_domain = RADEON_DOMAIN_GTT;
pipe_mutex_init(bo->map_mutex);
 
util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
 
pipe_mutex_unlock(mgr->bo_handles_mutex);
 
if (mgr->va) {
struct drm_radeon_gem_va va;
 
bo->va = radeon_bomgr_find_va(mgr, bo->base.size, 1 << 20);
 
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
va.vm_id = 0;
va.offset = bo->va;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
radeon_bo_destroy(&bo->base);
return NULL;
}
pipe_mutex_lock(mgr->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
 
pipe_mutex_unlock(mgr->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
 
util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
pipe_mutex_unlock(mgr->bo_handles_mutex);
}
 
ws->allocated_gtt += align(bo->base.size, 4096);
 
return (struct pb_buffer*)bo;
}
 
static struct pb_buffer *radeon_winsys_bo_from_handle(struct radeon_winsys *rws,
struct winsys_handle *whandle,
unsigned *stride)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_bo *bo;
struct radeon_bomgr *mgr = radeon_bomgr(ws->kman);
int r;
unsigned handle;
uint64_t size = 0;
 
/* We must maintain a list of pairs <handle, bo>, so that we always return
* the same BO for one particular handle. If we didn't do that and created
* more than one BO for the same handle and then relocated them in a CS,
* we would hit a deadlock in the kernel.
*
* The list of pairs is guarded by a mutex, of course. */
pipe_mutex_lock(mgr->bo_handles_mutex);
 
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
/* First check if there already is an existing bo for the handle. */
bo = util_hash_table_get(mgr->bo_names, (void*)(uintptr_t)whandle->handle);
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
/* We must first get the GEM handle, as fds are unreliable keys */
r = drmPrimeFDToHandle(ws->fd, whandle->handle, &handle);
if (r)
goto fail;
bo = util_hash_table_get(mgr->bo_handles, (void*)(uintptr_t)handle);
} else {
/* Unknown handle type */
goto fail;
}
 
if (bo) {
/* Increase the refcount. */
struct pb_buffer *b = NULL;
pb_reference(&b, &bo->base);
goto done;
}
 
/* There isn't, create a new one. */
bo = CALLOC_STRUCT(radeon_bo);
if (!bo) {
goto fail;
}
 
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
struct drm_gem_open open_arg = {};
memset(&open_arg, 0, sizeof(open_arg));
/* Open the BO. */
open_arg.name = whandle->handle;
if (drmIoctl(ws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
FREE(bo);
goto fail;
}
handle = open_arg.handle;
size = open_arg.size;
bo->flink_name = whandle->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
size = lseek(whandle->handle, 0, SEEK_END);
/*
* Could check errno to determine whether the kernel is new enough, but
* it doesn't really matter why this failed, just that it failed.
*/
if (size == (off_t)-1) {
FREE(bo);
goto fail;
}
lseek(whandle->handle, 0, SEEK_SET);
}
 
bo->handle = handle;
 
/* Initialize it. */
pipe_reference_init(&bo->base.reference, 1);
bo->base.alignment = 0;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->base.size = (unsigned) size;
bo->base.vtbl = &radeon_bo_vtbl;
bo->mgr = mgr;
bo->rws = mgr->rws;
bo->va = 0;
pipe_mutex_init(bo->map_mutex);
 
if (bo->flink_name)
util_hash_table_set(mgr->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
 
util_hash_table_set(mgr->bo_handles, (void*)(uintptr_t)bo->handle, bo);
 
done:
pipe_mutex_unlock(mgr->bo_handles_mutex);
 
if (stride)
*stride = whandle->stride;
 
if (mgr->va && !bo->va) {
struct drm_radeon_gem_va va;
 
bo->va = radeon_bomgr_find_va(mgr, bo->base.size, 1 << 20);
 
va.handle = bo->handle;
va.operation = RADEON_VA_MAP;
va.vm_id = 0;
va.offset = bo->va;
va.flags = RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_WRITEABLE |
RADEON_VM_PAGE_SNOOPED;
va.offset = bo->va;
r = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_VA, &va, sizeof(va));
if (r && va.operation == RADEON_VA_RESULT_ERROR) {
fprintf(stderr, "radeon: Failed to assign virtual address space\n");
radeon_bo_destroy(&bo->base);
return NULL;
}
pipe_mutex_lock(mgr->bo_handles_mutex);
if (va.operation == RADEON_VA_RESULT_VA_EXIST) {
struct pb_buffer *b = &bo->base;
struct radeon_bo *old_bo =
util_hash_table_get(mgr->bo_vas, (void*)(uintptr_t)va.offset);
 
pipe_mutex_unlock(mgr->bo_handles_mutex);
pb_reference(&b, &old_bo->base);
return b;
}
 
util_hash_table_set(mgr->bo_vas, (void*)(uintptr_t)bo->va, bo);
pipe_mutex_unlock(mgr->bo_handles_mutex);
}
 
bo->initial_domain = radeon_bo_get_initial_domain((void*)bo);
 
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
ws->allocated_vram += align(bo->base.size, 4096);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
ws->allocated_gtt += align(bo->base.size, 4096);
 
return (struct pb_buffer*)bo;
 
fail:
pipe_mutex_unlock(mgr->bo_handles_mutex);
return NULL;
}
 
static boolean radeon_winsys_bo_get_handle(struct pb_buffer *buffer,
unsigned stride,
struct winsys_handle *whandle)
{
struct drm_gem_flink flink;
struct radeon_bo *bo = get_radeon_bo(buffer);
 
memset(&flink, 0, sizeof(flink));
 
if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
if (!bo->flink_name) {
flink.handle = bo->handle;
 
if (ioctl(bo->rws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
return FALSE;
}
 
bo->flink_name = flink.name;
 
pipe_mutex_lock(bo->mgr->bo_handles_mutex);
util_hash_table_set(bo->mgr->bo_names, (void*)(uintptr_t)bo->flink_name, bo);
pipe_mutex_unlock(bo->mgr->bo_handles_mutex);
}
whandle->handle = bo->flink_name;
} else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
whandle->handle = bo->handle;
} else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
if (drmPrimeHandleToFD(bo->rws->fd, bo->handle, DRM_CLOEXEC, (int*)&whandle->handle))
return FALSE;
}
 
whandle->stride = stride;
return TRUE;
}
 
static uint64_t radeon_winsys_bo_va(struct radeon_winsys_cs_handle *buf)
{
return ((struct radeon_bo*)buf)->va;
}
 
void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.buffer_get_cs_handle = radeon_drm_get_cs_handle;
ws->base.buffer_set_tiling = radeon_bo_set_tiling;
ws->base.buffer_get_tiling = radeon_bo_get_tiling;
ws->base.buffer_map = radeon_bo_map;
ws->base.buffer_unmap = radeon_bo_unmap;
ws->base.buffer_wait = radeon_bo_wait;
ws->base.buffer_is_busy = radeon_bo_is_busy;
ws->base.buffer_create = radeon_winsys_bo_create;
ws->base.buffer_from_handle = radeon_winsys_bo_from_handle;
ws->base.buffer_from_ptr = radeon_winsys_bo_from_ptr;
ws->base.buffer_get_handle = radeon_winsys_bo_get_handle;
ws->base.buffer_get_virtual_address = radeon_winsys_bo_va;
ws->base.buffer_get_initial_domain = radeon_bo_get_initial_domain;
}
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_bo.h
0,0 → 1,82
/*
* Copyright © 2008 Jérôme Glisse
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <glisse@freedesktop.org>
* Marek Olšák <maraeo@gmail.com>
*/
#ifndef RADEON_DRM_BO_H
#define RADEON_DRM_BO_H
 
#include "radeon_drm_winsys.h"
#include "pipebuffer/pb_bufmgr.h"
#include "os/os_thread.h"
 
struct radeon_bomgr;
 
struct radeon_bo_desc {
struct pb_desc base;
 
unsigned initial_domains;
unsigned flags;
};
 
struct radeon_bo {
struct pb_buffer base;
 
struct radeon_bomgr *mgr;
struct radeon_drm_winsys *rws;
void *user_ptr; /* from buffer_from_ptr */
 
void *ptr;
pipe_mutex map_mutex;
 
uint32_t handle;
uint32_t flink_name;
uint64_t va;
enum radeon_bo_domain initial_domain;
 
/* how many command streams is this bo referenced in? */
int num_cs_references;
 
/* how many command streams, which are being emitted in a separate
* thread, is this bo referenced in? */
int num_active_ioctls;
};
 
struct pb_manager *radeon_bomgr_create(struct radeon_drm_winsys *rws);
void radeon_bomgr_init_functions(struct radeon_drm_winsys *ws);
 
static INLINE
void radeon_bo_reference(struct radeon_bo **dst, struct radeon_bo *src)
{
pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
}
 
void *radeon_bo_do_map(struct radeon_bo *bo);
 
#endif
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_cs.c
0,0 → 1,669
/*
* Copyright © 2008 Jérôme Glisse
* Copyright © 2010 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*
* Based on work from libdrm_radeon by:
* Aapo Tahkola <aet@rasterburn.org>
* Nicolai Haehnle <prefect_@gmx.net>
* Jérôme Glisse <glisse@freedesktop.org>
*/
 
/*
This file replaces libdrm's radeon_cs_gem with our own implemention.
It's optimized specifically for Radeon DRM.
Reloc writes and space checking are faster and simpler than their
counterparts in libdrm (the time complexity of all the functions
is O(1) in nearly all scenarios, thanks to hashing).
 
It works like this:
 
cs_add_reloc(cs, buf, read_domain, write_domain) adds a new relocation and
also adds the size of 'buf' to the used_gart and used_vram winsys variables
based on the domains, which are simply or'd for the accounting purposes.
The adding is skipped if the reloc is already present in the list, but it
accounts any newly-referenced domains.
 
cs_validate is then called, which just checks:
used_vram/gart < vram/gart_size * 0.8
The 0.8 number allows for some memory fragmentation. If the validation
fails, the pipe driver flushes CS and tries do the validation again,
i.e. it validates only that one operation. If it fails again, it drops
the operation on the floor and prints some nasty message to stderr.
(done in the pipe driver)
 
cs_write_reloc(cs, buf) just writes a reloc that has been added using
cs_add_reloc. The read_domain and write_domain parameters have been removed,
because we already specify them in cs_add_reloc.
*/
 
#include "radeon_drm_cs.h"
 
#include "util/u_memory.h"
#include "os/os_time.h"
 
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <xf86drm.h>
 
 
#define RELOC_DWORDS (sizeof(struct drm_radeon_cs_reloc) / sizeof(uint32_t))
 
static struct pipe_fence_handle *
radeon_cs_create_fence(struct radeon_winsys_cs *rcs);
static void radeon_fence_reference(struct pipe_fence_handle **dst,
struct pipe_fence_handle *src);
 
static boolean radeon_init_cs_context(struct radeon_cs_context *csc,
struct radeon_drm_winsys *ws)
{
int i;
 
csc->fd = ws->fd;
csc->nrelocs = 512;
csc->relocs_bo = (struct radeon_bo**)
CALLOC(1, csc->nrelocs * sizeof(struct radeon_bo*));
if (!csc->relocs_bo) {
return FALSE;
}
 
csc->relocs = (struct drm_radeon_cs_reloc*)
CALLOC(1, csc->nrelocs * sizeof(struct drm_radeon_cs_reloc));
if (!csc->relocs) {
FREE(csc->relocs_bo);
return FALSE;
}
 
csc->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
csc->chunks[0].length_dw = 0;
csc->chunks[0].chunk_data = (uint64_t)(uintptr_t)csc->buf;
csc->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
csc->chunks[1].length_dw = 0;
csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
csc->chunks[2].chunk_id = RADEON_CHUNK_ID_FLAGS;
csc->chunks[2].length_dw = 2;
csc->chunks[2].chunk_data = (uint64_t)(uintptr_t)&csc->flags;
 
csc->chunk_array[0] = (uint64_t)(uintptr_t)&csc->chunks[0];
csc->chunk_array[1] = (uint64_t)(uintptr_t)&csc->chunks[1];
csc->chunk_array[2] = (uint64_t)(uintptr_t)&csc->chunks[2];
 
csc->cs.chunks = (uint64_t)(uintptr_t)csc->chunk_array;
 
for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
csc->reloc_indices_hashlist[i] = -1;
}
return TRUE;
}
 
static void radeon_cs_context_cleanup(struct radeon_cs_context *csc)
{
unsigned i;
 
for (i = 0; i < csc->crelocs; i++) {
p_atomic_dec(&csc->relocs_bo[i]->num_cs_references);
radeon_bo_reference(&csc->relocs_bo[i], NULL);
}
 
csc->crelocs = 0;
csc->validated_crelocs = 0;
csc->chunks[0].length_dw = 0;
csc->chunks[1].length_dw = 0;
csc->used_gart = 0;
csc->used_vram = 0;
 
for (i = 0; i < Elements(csc->reloc_indices_hashlist); i++) {
csc->reloc_indices_hashlist[i] = -1;
}
}
 
static void radeon_destroy_cs_context(struct radeon_cs_context *csc)
{
radeon_cs_context_cleanup(csc);
FREE(csc->relocs_bo);
FREE(csc->relocs);
}
 
 
static struct radeon_winsys_cs *
radeon_drm_cs_create(struct radeon_winsys *rws,
enum ring_type ring_type,
void (*flush)(void *ctx, unsigned flags,
struct pipe_fence_handle **fence),
void *flush_ctx,
struct radeon_winsys_cs_handle *trace_buf)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct radeon_drm_cs *cs;
 
cs = CALLOC_STRUCT(radeon_drm_cs);
if (!cs) {
return NULL;
}
pipe_semaphore_init(&cs->flush_completed, 1);
 
cs->ws = ws;
cs->flush_cs = flush;
cs->flush_data = flush_ctx;
cs->trace_buf = (struct radeon_bo*)trace_buf;
 
if (!radeon_init_cs_context(&cs->csc1, cs->ws)) {
FREE(cs);
return NULL;
}
if (!radeon_init_cs_context(&cs->csc2, cs->ws)) {
radeon_destroy_cs_context(&cs->csc1);
FREE(cs);
return NULL;
}
 
/* Set the first command buffer as current. */
cs->csc = &cs->csc1;
cs->cst = &cs->csc2;
cs->base.buf = cs->csc->buf;
cs->base.ring_type = ring_type;
 
p_atomic_inc(&ws->num_cs);
return &cs->base;
}
 
#define OUT_CS(cs, value) (cs)->buf[(cs)->cdw++] = (value)
 
static INLINE void update_reloc(struct drm_radeon_cs_reloc *reloc,
enum radeon_bo_domain rd,
enum radeon_bo_domain wd,
unsigned priority,
enum radeon_bo_domain *added_domains)
{
*added_domains = (rd | wd) & ~(reloc->read_domains | reloc->write_domain);
 
reloc->read_domains |= rd;
reloc->write_domain |= wd;
reloc->flags = MAX2(reloc->flags, priority);
}
 
int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo)
{
unsigned hash = bo->handle & (Elements(csc->reloc_indices_hashlist)-1);
int i = csc->reloc_indices_hashlist[hash];
 
/* not found or found */
if (i == -1 || csc->relocs_bo[i] == bo)
return i;
 
/* Hash collision, look for the BO in the list of relocs linearly. */
for (i = csc->crelocs - 1; i >= 0; i--) {
if (csc->relocs_bo[i] == bo) {
/* Put this reloc in the hash list.
* This will prevent additional hash collisions if there are
* several consecutive get_reloc calls for the same buffer.
*
* Example: Assuming buffers A,B,C collide in the hash list,
* the following sequence of relocs:
* AAAAAAAAAAABBBBBBBBBBBBBBCCCCCCCC
* will collide here: ^ and here: ^,
* meaning that we should get very few collisions in the end. */
csc->reloc_indices_hashlist[hash] = i;
return i;
}
}
return -1;
}
 
static unsigned radeon_add_reloc(struct radeon_drm_cs *cs,
struct radeon_bo *bo,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
unsigned priority,
enum radeon_bo_domain *added_domains)
{
struct radeon_cs_context *csc = cs->csc;
struct drm_radeon_cs_reloc *reloc;
unsigned hash = bo->handle & (Elements(csc->reloc_indices_hashlist)-1);
enum radeon_bo_domain rd = usage & RADEON_USAGE_READ ? domains : 0;
enum radeon_bo_domain wd = usage & RADEON_USAGE_WRITE ? domains : 0;
int i = -1;
 
priority = MIN2(priority, 15);
*added_domains = 0;
 
i = radeon_get_reloc(csc, bo);
 
if (i >= 0) {
reloc = &csc->relocs[i];
update_reloc(reloc, rd, wd, priority, added_domains);
 
/* For async DMA, every add_reloc call must add a buffer to the list
* no matter how many duplicates there are. This is due to the fact
* the DMA CS checker doesn't use NOP packets for offset patching,
* but always uses the i-th buffer from the list to patch the i-th
* offset. If there are N offsets in a DMA CS, there must also be N
* buffers in the relocation list.
*
* This doesn't have to be done if virtual memory is enabled,
* because there is no offset patching with virtual memory.
*/
if (cs->base.ring_type != RING_DMA || cs->ws->info.r600_virtual_address) {
return i;
}
}
 
/* New relocation, check if the backing array is large enough. */
if (csc->crelocs >= csc->nrelocs) {
uint32_t size;
csc->nrelocs += 10;
 
size = csc->nrelocs * sizeof(struct radeon_bo*);
csc->relocs_bo = realloc(csc->relocs_bo, size);
 
size = csc->nrelocs * sizeof(struct drm_radeon_cs_reloc);
csc->relocs = realloc(csc->relocs, size);
 
csc->chunks[1].chunk_data = (uint64_t)(uintptr_t)csc->relocs;
}
 
/* Initialize the new relocation. */
csc->relocs_bo[csc->crelocs] = NULL;
radeon_bo_reference(&csc->relocs_bo[csc->crelocs], bo);
p_atomic_inc(&bo->num_cs_references);
reloc = &csc->relocs[csc->crelocs];
reloc->handle = bo->handle;
reloc->read_domains = rd;
reloc->write_domain = wd;
reloc->flags = priority;
 
csc->reloc_indices_hashlist[hash] = csc->crelocs;
 
csc->chunks[1].length_dw += RELOC_DWORDS;
 
*added_domains = rd | wd;
return csc->crelocs++;
}
 
static unsigned radeon_drm_cs_add_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf,
enum radeon_bo_usage usage,
enum radeon_bo_domain domains,
enum radeon_bo_priority priority)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)buf;
enum radeon_bo_domain added_domains;
unsigned index = radeon_add_reloc(cs, bo, usage, domains, priority, &added_domains);
 
if (added_domains & RADEON_DOMAIN_GTT)
cs->csc->used_gart += bo->base.size;
if (added_domains & RADEON_DOMAIN_VRAM)
cs->csc->used_vram += bo->base.size;
 
return index;
}
 
static int radeon_drm_cs_get_reloc(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *buf)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
return radeon_get_reloc(cs->csc, (struct radeon_bo*)buf);
}
 
static boolean radeon_drm_cs_validate(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
boolean status =
cs->csc->used_gart < cs->ws->info.gart_size * 0.8 &&
cs->csc->used_vram < cs->ws->info.vram_size * 0.8;
 
if (status) {
cs->csc->validated_crelocs = cs->csc->crelocs;
} else {
/* Remove lately-added relocations. The validation failed with them
* and the CS is about to be flushed because of that. Keep only
* the already-validated relocations. */
unsigned i;
 
for (i = cs->csc->validated_crelocs; i < cs->csc->crelocs; i++) {
p_atomic_dec(&cs->csc->relocs_bo[i]->num_cs_references);
radeon_bo_reference(&cs->csc->relocs_bo[i], NULL);
}
cs->csc->crelocs = cs->csc->validated_crelocs;
 
/* Flush if there are any relocs. Clean up otherwise. */
if (cs->csc->crelocs) {
cs->flush_cs(cs->flush_data, RADEON_FLUSH_ASYNC, NULL);
} else {
radeon_cs_context_cleanup(cs->csc);
 
assert(cs->base.cdw == 0);
if (cs->base.cdw != 0) {
fprintf(stderr, "radeon: Unexpected error in %s.\n", __func__);
}
}
}
return status;
}
 
static boolean radeon_drm_cs_memory_below_limit(struct radeon_winsys_cs *rcs, uint64_t vram, uint64_t gtt)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
boolean status =
(cs->csc->used_gart + gtt) < cs->ws->info.gart_size * 0.7 &&
(cs->csc->used_vram + vram) < cs->ws->info.vram_size * 0.7;
 
return status;
}
 
void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
{
unsigned i;
 
if (drmCommandWriteRead(csc->fd, DRM_RADEON_CS,
&csc->cs, sizeof(struct drm_radeon_cs))) {
if (debug_get_bool_option("RADEON_DUMP_CS", FALSE)) {
unsigned i;
 
fprintf(stderr, "radeon: The kernel rejected CS, dumping...\n");
for (i = 0; i < csc->chunks[0].length_dw; i++) {
fprintf(stderr, "0x%08X\n", csc->buf[i]);
}
} else {
fprintf(stderr, "radeon: The kernel rejected CS, "
"see dmesg for more information.\n");
}
}
 
if (cs->trace_buf) {
radeon_dump_cs_on_lockup(cs, csc);
}
 
for (i = 0; i < csc->crelocs; i++)
p_atomic_dec(&csc->relocs_bo[i]->num_active_ioctls);
 
radeon_cs_context_cleanup(csc);
}
 
/*
* Make sure previous submission of this cs are completed
*/
void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
/* Wait for any pending ioctl to complete. */
if (cs->ws->thread) {
pipe_semaphore_wait(&cs->flush_completed);
pipe_semaphore_signal(&cs->flush_completed);
}
}
 
DEBUG_GET_ONCE_BOOL_OPTION(noop, "RADEON_NOOP", FALSE)
 
static void radeon_drm_cs_flush(struct radeon_winsys_cs *rcs,
unsigned flags,
struct pipe_fence_handle **fence,
uint32_t cs_trace_id)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_cs_context *tmp;
 
switch (cs->base.ring_type) {
case RING_DMA:
/* pad DMA ring to 8 DWs */
if (cs->ws->info.chip_class <= SI) {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0xf0000000); /* NOP packet */
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x00000000); /* NOP packet */
}
break;
case RING_GFX:
/* pad DMA ring to 8 DWs to meet CP fetch alignment requirements
* r6xx, requires at least 4 dw alignment to avoid a hw bug.
* hawaii with old firmware needs type2 nop packet.
* accel_working2 with value 3 indicates the new firmware.
*/
if (cs->ws->info.chip_class <= SI ||
(cs->ws->info.family == CHIP_HAWAII &&
cs->ws->accel_working2 < 3)) {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
} else {
while (rcs->cdw & 7)
OUT_CS(&cs->base, 0xffff1000); /* type3 nop packet */
}
break;
case RING_UVD:
while (rcs->cdw & 15)
OUT_CS(&cs->base, 0x80000000); /* type2 nop packet */
break;
default:
break;
}
 
if (rcs->cdw > RADEON_MAX_CMDBUF_DWORDS) {
fprintf(stderr, "radeon: command stream overflowed\n");
}
 
if (fence) {
radeon_fence_reference(fence, NULL);
*fence = radeon_cs_create_fence(rcs);
}
 
radeon_drm_cs_sync_flush(rcs);
 
/* Swap command streams. */
tmp = cs->csc;
cs->csc = cs->cst;
cs->cst = tmp;
 
cs->cst->cs_trace_id = cs_trace_id;
 
/* If the CS is not empty or overflowed, emit it in a separate thread. */
if (cs->base.cdw && cs->base.cdw <= RADEON_MAX_CMDBUF_DWORDS && !debug_get_option_noop()) {
unsigned i, crelocs;
 
crelocs = cs->cst->crelocs;
 
cs->cst->chunks[0].length_dw = cs->base.cdw;
 
for (i = 0; i < crelocs; i++) {
/* Update the number of active asynchronous CS ioctls for the buffer. */
p_atomic_inc(&cs->cst->relocs_bo[i]->num_active_ioctls);
}
 
switch (cs->base.ring_type) {
case RING_DMA:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_DMA;
cs->cst->cs.num_chunks = 3;
if (cs->ws->info.r600_virtual_address) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
}
break;
 
case RING_UVD:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_UVD;
cs->cst->cs.num_chunks = 3;
break;
 
case RING_VCE:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_VCE;
cs->cst->cs.num_chunks = 3;
break;
 
default:
case RING_GFX:
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 2;
if (flags & RADEON_FLUSH_KEEP_TILING_FLAGS) {
cs->cst->flags[0] |= RADEON_CS_KEEP_TILING_FLAGS;
cs->cst->cs.num_chunks = 3;
}
if (cs->ws->info.r600_virtual_address) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_END_OF_FRAME) {
cs->cst->flags[0] |= RADEON_CS_END_OF_FRAME;
cs->cst->cs.num_chunks = 3;
}
if (flags & RADEON_FLUSH_COMPUTE) {
cs->cst->flags[1] = RADEON_CS_RING_COMPUTE;
cs->cst->cs.num_chunks = 3;
}
break;
}
 
if (cs->ws->thread) {
pipe_semaphore_wait(&cs->flush_completed);
radeon_drm_ws_queue_cs(cs->ws, cs);
if (!(flags & RADEON_FLUSH_ASYNC))
radeon_drm_cs_sync_flush(rcs);
} else {
radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
}
} else {
radeon_cs_context_cleanup(cs->cst);
}
 
/* Prepare a new CS. */
cs->base.buf = cs->csc->buf;
cs->base.cdw = 0;
 
cs->ws->num_cs_flushes++;
}
 
static void radeon_drm_cs_destroy(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
radeon_drm_cs_sync_flush(rcs);
pipe_semaphore_destroy(&cs->flush_completed);
radeon_cs_context_cleanup(&cs->csc1);
radeon_cs_context_cleanup(&cs->csc2);
p_atomic_dec(&cs->ws->num_cs);
radeon_destroy_cs_context(&cs->csc1);
radeon_destroy_cs_context(&cs->csc2);
FREE(cs);
}
 
static boolean radeon_bo_is_referenced(struct radeon_winsys_cs *rcs,
struct radeon_winsys_cs_handle *_buf,
enum radeon_bo_usage usage)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct radeon_bo *bo = (struct radeon_bo*)_buf;
int index;
 
if (!bo->num_cs_references)
return FALSE;
 
index = radeon_get_reloc(cs->csc, bo);
if (index == -1)
return FALSE;
 
if ((usage & RADEON_USAGE_WRITE) && cs->csc->relocs[index].write_domain)
return TRUE;
if ((usage & RADEON_USAGE_READ) && cs->csc->relocs[index].read_domains)
return TRUE;
 
return FALSE;
}
 
/* FENCES */
 
static struct pipe_fence_handle *
radeon_cs_create_fence(struct radeon_winsys_cs *rcs)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
struct pb_buffer *fence;
 
/* Create a fence, which is a dummy BO. */
fence = cs->ws->base.buffer_create(&cs->ws->base, 1, 1, TRUE,
RADEON_DOMAIN_GTT, 0);
/* Add the fence as a dummy relocation. */
cs->ws->base.cs_add_reloc(rcs, cs->ws->base.buffer_get_cs_handle(fence),
RADEON_USAGE_READWRITE, RADEON_DOMAIN_GTT,
RADEON_PRIO_MIN);
return (struct pipe_fence_handle*)fence;
}
 
static bool radeon_fence_wait(struct radeon_winsys *ws,
struct pipe_fence_handle *fence,
uint64_t timeout)
{
struct pb_buffer *rfence = (struct pb_buffer*)fence;
 
if (timeout == 0)
return !ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE);
 
if (timeout != PIPE_TIMEOUT_INFINITE) {
int64_t start_time = os_time_get();
 
/* Convert to microseconds. */
timeout /= 1000;
 
/* Wait in a loop. */
while (ws->buffer_is_busy(rfence, RADEON_USAGE_READWRITE)) {
if (os_time_get() - start_time >= timeout) {
return FALSE;
}
os_time_sleep(10);
}
return TRUE;
}
 
ws->buffer_wait(rfence, RADEON_USAGE_READWRITE);
return TRUE;
}
 
static void radeon_fence_reference(struct pipe_fence_handle **dst,
struct pipe_fence_handle *src)
{
pb_reference((struct pb_buffer**)dst, (struct pb_buffer*)src);
}
 
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.cs_create = radeon_drm_cs_create;
ws->base.cs_destroy = radeon_drm_cs_destroy;
ws->base.cs_add_reloc = radeon_drm_cs_add_reloc;
ws->base.cs_get_reloc = radeon_drm_cs_get_reloc;
ws->base.cs_validate = radeon_drm_cs_validate;
ws->base.cs_memory_below_limit = radeon_drm_cs_memory_below_limit;
ws->base.cs_flush = radeon_drm_cs_flush;
ws->base.cs_is_buffer_referenced = radeon_bo_is_referenced;
ws->base.cs_sync_flush = radeon_drm_cs_sync_flush;
ws->base.fence_wait = radeon_fence_wait;
ws->base.fence_reference = radeon_fence_reference;
}
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_cs.h
0,0 → 1,125
/*
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
 
#ifndef RADEON_DRM_CS_H
#define RADEON_DRM_CS_H
 
#include "radeon_drm_bo.h"
 
struct radeon_cs_context {
uint32_t buf[RADEON_MAX_CMDBUF_DWORDS];
 
int fd;
struct drm_radeon_cs cs;
struct drm_radeon_cs_chunk chunks[3];
uint64_t chunk_array[3];
uint32_t flags[2];
 
uint32_t cs_trace_id;
 
/* Relocs. */
unsigned nrelocs;
unsigned crelocs;
unsigned validated_crelocs;
struct radeon_bo **relocs_bo;
struct drm_radeon_cs_reloc *relocs;
 
int reloc_indices_hashlist[512];
 
uint64_t used_vram;
uint64_t used_gart;
};
 
struct radeon_drm_cs {
struct radeon_winsys_cs base;
 
/* We flip between these two CS. While one is being consumed
* by the kernel in another thread, the other one is being filled
* by the pipe driver. */
struct radeon_cs_context csc1;
struct radeon_cs_context csc2;
/* The currently-used CS. */
struct radeon_cs_context *csc;
/* The CS being currently-owned by the other thread. */
struct radeon_cs_context *cst;
 
/* The winsys. */
struct radeon_drm_winsys *ws;
 
/* Flush CS. */
void (*flush_cs)(void *ctx, unsigned flags, struct pipe_fence_handle **fence);
void *flush_data;
 
pipe_semaphore flush_completed;
struct radeon_bo *trace_buf;
};
 
int radeon_get_reloc(struct radeon_cs_context *csc, struct radeon_bo *bo);
 
static INLINE struct radeon_drm_cs *
radeon_drm_cs(struct radeon_winsys_cs *base)
{
return (struct radeon_drm_cs*)base;
}
 
static INLINE boolean
radeon_bo_is_referenced_by_cs(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
int num_refs = bo->num_cs_references;
return num_refs == bo->rws->num_cs ||
(num_refs && radeon_get_reloc(cs->csc, bo) != -1);
}
 
static INLINE boolean
radeon_bo_is_referenced_by_cs_for_write(struct radeon_drm_cs *cs,
struct radeon_bo *bo)
{
int index;
 
if (!bo->num_cs_references)
return FALSE;
 
index = radeon_get_reloc(cs->csc, bo);
if (index == -1)
return FALSE;
 
return cs->csc->relocs[index].write_domain != 0;
}
 
static INLINE boolean
radeon_bo_is_referenced_by_any_cs(struct radeon_bo *bo)
{
return bo->num_cs_references != 0;
}
 
void radeon_drm_cs_sync_flush(struct radeon_winsys_cs *rcs);
void radeon_drm_cs_init_functions(struct radeon_drm_winsys *ws);
void radeon_drm_cs_emit_ioctl_oneshot(struct radeon_drm_cs *cs, struct radeon_cs_context *csc);
 
void radeon_dump_cs_on_lockup(struct radeon_drm_cs *cs, struct radeon_cs_context *csc);
 
#endif
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_cs_dump.c
0,0 → 1,161
/*
* Copyright © 2013 Jérôme Glisse
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Jérôme Glisse <jglisse@redhat.com>
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <inttypes.h>
#include <xf86drm.h>
#include "radeon_drm_cs.h"
#include "radeon_drm_bo.h"
 
#define RADEON_CS_DUMP_AFTER_MS_TIMEOUT 500
 
void radeon_dump_cs_on_lockup(struct radeon_drm_cs *cs, struct radeon_cs_context *csc)
{
struct drm_radeon_gem_busy args;
FILE *dump;
unsigned i, lockup;
uint32_t *ptr;
char fname[32];
 
/* only dump the first cs to cause a lockup */
if (!csc->crelocs) {
/* can not determine if there was a lockup if no bo were use by
* the cs and most likely in such case no lockup occurs
*/
return;
}
 
memset(&args, 0, sizeof(args));
args.handle = csc->relocs_bo[0]->handle;
for (i = 0; i < RADEON_CS_DUMP_AFTER_MS_TIMEOUT; i++) {
usleep(1);
lockup = drmCommandWriteRead(csc->fd, DRM_RADEON_GEM_BUSY, &args, sizeof(args));
if (!lockup) {
break;
}
}
if (!lockup || i < RADEON_CS_DUMP_AFTER_MS_TIMEOUT) {
return;
}
 
ptr = radeon_bo_do_map(cs->trace_buf);
fprintf(stderr, "timeout on cs lockup likely happen at cs 0x%08x dw 0x%08x\n", ptr[1], ptr[0]);
 
if (csc->cs_trace_id != ptr[1]) {
return;
}
 
/* ok we are most likely facing a lockup write the standalone replay file */
snprintf(fname, sizeof(fname), "rlockup_0x%08x.c", csc->cs_trace_id);
dump = fopen(fname, "w");
if (dump == NULL) {
return;
}
fprintf(dump, "/* To build this file you will need to copy radeon_ctx.h\n");
fprintf(dump, " * in same directory. You can find radeon_ctx.h in mesa tree :\n");
fprintf(dump, " * mesa/src/gallium/winsys/radeon/drm/radeon_ctx.h\n");
fprintf(dump, " * Build with :\n");
fprintf(dump, " * gcc -O0 -g `pkg-config --cflags --libs libdrm` %s -o rlockup_0x%08x \n", fname, csc->cs_trace_id);
fprintf(dump, " */\n");
fprintf(dump, " /* timeout on cs lockup likely happen at cs 0x%08x dw 0x%08x*/\n", ptr[1], ptr[0]);
fprintf(dump, "#include <stdio.h>\n");
fprintf(dump, "#include <stdint.h>\n");
fprintf(dump, "#include \"radeon_ctx.h\"\n");
fprintf(dump, "\n");
fprintf(dump, "#define ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0]))\n");
fprintf(dump, "\n");
 
for (i = 0; i < csc->crelocs; i++) {
unsigned j, ndw = (csc->relocs_bo[i]->base.size + 3) >> 2;
 
ptr = radeon_bo_do_map(csc->relocs_bo[i]);
if (ptr) {
fprintf(dump, "static uint32_t bo_%04d_data[%d] = {\n ", i, ndw);
for (j = 0; j < ndw; j++) {
if (j && !(j % 8)) {
uint32_t offset = (j - 8) << 2;
fprintf(dump, " /* [0x%08x] va[0x%016"PRIx64"] */\n ", offset, offset + csc->relocs_bo[i]->va);
}
fprintf(dump, " 0x%08x,", ptr[j]);
}
fprintf(dump, "};\n\n");
}
}
 
fprintf(dump, "static uint32_t bo_relocs[%d] = {\n", csc->crelocs * 4);
for (i = 0; i < csc->crelocs; i++) {
fprintf(dump, " 0x%08x, 0x%08x, 0x%08x, 0x%08x,\n",
0, csc->relocs[i].read_domains, csc->relocs[i].write_domain, csc->relocs[i].flags);
}
fprintf(dump, "};\n\n");
 
fprintf(dump, "/* cs %d dw */\n", csc->chunks[0].length_dw);
fprintf(dump, "static uint32_t cs[] = {\n");
ptr = csc->buf;
for (i = 0; i < csc->chunks[0].length_dw; i++) {
fprintf(dump, " 0x%08x,\n", ptr[i]);
}
fprintf(dump, "};\n\n");
 
fprintf(dump, "static uint32_t cs_flags[2] = {\n");
fprintf(dump, " 0x%08x,\n", csc->flags[0]);
fprintf(dump, " 0x%08x,\n", csc->flags[1]);
fprintf(dump, "};\n\n");
 
fprintf(dump, "int main(int argc, char *argv[])\n");
fprintf(dump, "{\n");
fprintf(dump, " struct bo *bo[%d];\n", csc->crelocs);
fprintf(dump, " struct ctx ctx;\n");
fprintf(dump, "\n");
fprintf(dump, " ctx_init(&ctx);\n");
fprintf(dump, "\n");
 
for (i = 0; i < csc->crelocs; i++) {
unsigned ndw = (csc->relocs_bo[i]->base.size + 3) >> 2;
uint32_t *ptr;
 
ptr = radeon_bo_do_map(csc->relocs_bo[i]);
if (ptr) {
fprintf(dump, " bo[%d] = bo_new(&ctx, %d, bo_%04d_data, 0x%016"PRIx64", 0x%08x);\n",
i, ndw, i, csc->relocs_bo[i]->va, csc->relocs_bo[i]->base.alignment);
} else {
fprintf(dump, " bo[%d] = bo_new(&ctx, %d, NULL, 0x%016"PRIx64", 0x%08x);\n",
i, ndw, csc->relocs_bo[i]->va, csc->relocs_bo[i]->base.alignment);
}
}
fprintf(dump, "\n");
fprintf(dump, " ctx_cs(&ctx, cs, cs_flags, ARRAY_SIZE(cs), bo, bo_relocs, %d);\n", csc->crelocs);
fprintf(dump, "\n");
fprintf(dump, " fprintf(stderr, \"waiting for cs execution to end ....\\n\");\n");
fprintf(dump, " bo_wait(&ctx, bo[0]);\n");
fprintf(dump, "}\n");
fclose(dump);
}
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_public.h
0,0 → 1,14
#ifndef RADEON_DRM_PUBLIC_H
#define RADEON_DRM_PUBLIC_H
 
#include "pipe/p_defines.h"
 
struct radeon_winsys;
struct pipe_screen;
 
typedef struct pipe_screen *(*radeon_screen_create_t)(struct radeon_winsys *);
 
struct radeon_winsys *
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create);
 
#endif
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_surface.c
0,0 → 1,180
/*
* Copyright © 2014 Advanced Micro Devices, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* Authors:
* Marek Olšák <maraeo@gmail.com>
*/
 
#include "radeon_drm_winsys.h"
 
#include <radeon_surface.h>
 
static void surf_level_winsys_to_drm(struct radeon_surface_level *level_drm,
const struct radeon_surf_level *level_ws)
{
level_drm->offset = level_ws->offset;
level_drm->slice_size = level_ws->slice_size;
level_drm->npix_x = level_ws->npix_x;
level_drm->npix_y = level_ws->npix_y;
level_drm->npix_z = level_ws->npix_z;
level_drm->nblk_x = level_ws->nblk_x;
level_drm->nblk_y = level_ws->nblk_y;
level_drm->nblk_z = level_ws->nblk_z;
level_drm->pitch_bytes = level_ws->pitch_bytes;
level_drm->mode = level_ws->mode;
}
 
static void surf_level_drm_to_winsys(struct radeon_surf_level *level_ws,
const struct radeon_surface_level *level_drm)
{
level_ws->offset = level_drm->offset;
level_ws->slice_size = level_drm->slice_size;
level_ws->npix_x = level_drm->npix_x;
level_ws->npix_y = level_drm->npix_y;
level_ws->npix_z = level_drm->npix_z;
level_ws->nblk_x = level_drm->nblk_x;
level_ws->nblk_y = level_drm->nblk_y;
level_ws->nblk_z = level_drm->nblk_z;
level_ws->pitch_bytes = level_drm->pitch_bytes;
level_ws->mode = level_drm->mode;
}
 
static void surf_winsys_to_drm(struct radeon_surface *surf_drm,
const struct radeon_surf *surf_ws)
{
int i;
 
memset(surf_drm, 0, sizeof(*surf_drm));
 
surf_drm->npix_x = surf_ws->npix_x;
surf_drm->npix_y = surf_ws->npix_y;
surf_drm->npix_z = surf_ws->npix_z;
surf_drm->blk_w = surf_ws->blk_w;
surf_drm->blk_h = surf_ws->blk_h;
surf_drm->blk_d = surf_ws->blk_d;
surf_drm->array_size = surf_ws->array_size;
surf_drm->last_level = surf_ws->last_level;
surf_drm->bpe = surf_ws->bpe;
surf_drm->nsamples = surf_ws->nsamples;
surf_drm->flags = surf_ws->flags;
 
surf_drm->bo_size = surf_ws->bo_size;
surf_drm->bo_alignment = surf_ws->bo_alignment;
 
surf_drm->bankw = surf_ws->bankw;
surf_drm->bankh = surf_ws->bankh;
surf_drm->mtilea = surf_ws->mtilea;
surf_drm->tile_split = surf_ws->tile_split;
surf_drm->stencil_tile_split = surf_ws->stencil_tile_split;
surf_drm->stencil_offset = surf_ws->stencil_offset;
 
for (i = 0; i < RADEON_SURF_MAX_LEVEL; i++) {
surf_level_winsys_to_drm(&surf_drm->level[i], &surf_ws->level[i]);
surf_level_winsys_to_drm(&surf_drm->stencil_level[i],
&surf_ws->stencil_level[i]);
 
surf_drm->tiling_index[i] = surf_ws->tiling_index[i];
surf_drm->stencil_tiling_index[i] = surf_ws->stencil_tiling_index[i];
}
}
 
static void surf_drm_to_winsys(struct radeon_surf *surf_ws,
const struct radeon_surface *surf_drm)
{
int i;
 
memset(surf_ws, 0, sizeof(*surf_ws));
 
surf_ws->npix_x = surf_drm->npix_x;
surf_ws->npix_y = surf_drm->npix_y;
surf_ws->npix_z = surf_drm->npix_z;
surf_ws->blk_w = surf_drm->blk_w;
surf_ws->blk_h = surf_drm->blk_h;
surf_ws->blk_d = surf_drm->blk_d;
surf_ws->array_size = surf_drm->array_size;
surf_ws->last_level = surf_drm->last_level;
surf_ws->bpe = surf_drm->bpe;
surf_ws->nsamples = surf_drm->nsamples;
surf_ws->flags = surf_drm->flags;
 
surf_ws->bo_size = surf_drm->bo_size;
surf_ws->bo_alignment = surf_drm->bo_alignment;
 
surf_ws->bankw = surf_drm->bankw;
surf_ws->bankh = surf_drm->bankh;
surf_ws->mtilea = surf_drm->mtilea;
surf_ws->tile_split = surf_drm->tile_split;
surf_ws->stencil_tile_split = surf_drm->stencil_tile_split;
surf_ws->stencil_offset = surf_drm->stencil_offset;
 
for (i = 0; i < RADEON_SURF_MAX_LEVEL; i++) {
surf_level_drm_to_winsys(&surf_ws->level[i], &surf_drm->level[i]);
surf_level_drm_to_winsys(&surf_ws->stencil_level[i],
&surf_drm->stencil_level[i]);
 
surf_ws->tiling_index[i] = surf_drm->tiling_index[i];
surf_ws->stencil_tiling_index[i] = surf_drm->stencil_tiling_index[i];
}
}
 
static int radeon_winsys_surface_init(struct radeon_winsys *rws,
struct radeon_surf *surf_ws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
struct radeon_surface surf_drm;
int r;
 
surf_winsys_to_drm(&surf_drm, surf_ws);
 
r = radeon_surface_init(ws->surf_man, &surf_drm);
if (r)
return r;
 
surf_drm_to_winsys(surf_ws, &surf_drm);
return 0;
}
 
static int radeon_winsys_surface_best(struct radeon_winsys *rws,
struct radeon_surf *surf_ws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
struct radeon_surface surf_drm;
int r;
 
surf_winsys_to_drm(&surf_drm, surf_ws);
 
r = radeon_surface_best(ws->surf_man, &surf_drm);
if (r)
return r;
 
surf_drm_to_winsys(surf_ws, &surf_drm);
return 0;
}
 
void radeon_surface_init_functions(struct radeon_drm_winsys *ws)
{
ws->base.surface_init = radeon_winsys_surface_init;
ws->base.surface_best = radeon_winsys_surface_best;
}
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_winsys.c
0,0 → 1,775
/*
* Copyright © 2009 Corbin Simpson
* Copyright © 2011 Marek Olšák <maraeo@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Corbin Simpson <MostAwesomeDude@gmail.com>
* Joakim Sindholt <opensource@zhasha.com>
* Marek Olšák <maraeo@gmail.com>
*/
 
#include "radeon_drm_bo.h"
#include "radeon_drm_cs.h"
#include "radeon_drm_public.h"
 
#include "pipebuffer/pb_bufmgr.h"
#include "util/u_memory.h"
#include "util/u_hash_table.h"
 
#include <xf86drm.h>
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <radeon_surface.h>
 
#ifndef RADEON_INFO_ACTIVE_CU_COUNT
#define RADEON_INFO_ACTIVE_CU_COUNT 0x20
#endif
 
#ifndef RADEON_INFO_CURRENT_GPU_TEMP
#define RADEON_INFO_CURRENT_GPU_TEMP 0x21
#define RADEON_INFO_CURRENT_GPU_SCLK 0x22
#define RADEON_INFO_CURRENT_GPU_MCLK 0x23
#define RADEON_INFO_READ_REG 0x24
#endif
 
static struct util_hash_table *fd_tab = NULL;
pipe_static_mutex(fd_tab_mutex);
 
/* Enable/disable feature access for one command stream.
* If enable == TRUE, return TRUE on success.
* Otherwise, return FALSE.
*
* We basically do the same thing kernel does, because we have to deal
* with multiple contexts (here command streams) backed by one winsys. */
static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
struct radeon_drm_cs **owner,
pipe_mutex *mutex,
unsigned request, const char *request_name,
boolean enable)
{
struct drm_radeon_info info;
unsigned value = enable ? 1 : 0;
 
memset(&info, 0, sizeof(info));
 
pipe_mutex_lock(*mutex);
 
/* Early exit if we are sure the request will fail. */
if (enable) {
if (*owner) {
pipe_mutex_unlock(*mutex);
return FALSE;
}
} else {
if (*owner != applier) {
pipe_mutex_unlock(*mutex);
return FALSE;
}
}
 
/* Pass through the request to the kernel. */
info.value = (unsigned long)&value;
info.request = request;
if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
&info, sizeof(info)) != 0) {
pipe_mutex_unlock(*mutex);
return FALSE;
}
 
/* Update the rights in the winsys. */
if (enable) {
if (value) {
*owner = applier;
pipe_mutex_unlock(*mutex);
return TRUE;
}
} else {
*owner = NULL;
}
 
pipe_mutex_unlock(*mutex);
return FALSE;
}
 
static boolean radeon_get_drm_value(int fd, unsigned request,
const char *errname, uint32_t *out)
{
struct drm_radeon_info info;
int retval;
 
memset(&info, 0, sizeof(info));
 
info.value = (unsigned long)out;
info.request = request;
 
retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
if (retval) {
if (errname) {
fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
errname, retval);
}
return FALSE;
}
return TRUE;
}
 
/* Helper function to do the ioctls needed for setup and init. */
static boolean do_winsys_init(struct radeon_drm_winsys *ws)
{
struct drm_radeon_gem_info gem_info;
int retval;
drmVersionPtr version;
 
memset(&gem_info, 0, sizeof(gem_info));
 
/* We do things in a specific order here.
*
* DRM version first. We need to be sure we're running on a KMS chipset.
* This is also for some features.
*
* Then, the PCI ID. This is essential and should return usable numbers
* for all Radeons. If this fails, we probably got handed an FD for some
* non-Radeon card.
*
* The GEM info is actually bogus on the kernel side, as well as our side
* (see radeon_gem_info_ioctl in radeon_gem.c) but that's alright because
* we don't actually use the info for anything yet.
*
* The GB and Z pipe requests should always succeed, but they might not
* return sensical values for all chipsets, but that's alright because
* the pipe drivers already know that.
*/
 
/* Get DRM version. */
version = drmGetVersion(ws->fd);
if (version->version_major != 2 ||
version->version_minor < 3) {
fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
"only compatible with 2.3.x (kernel 2.6.34) or later.\n",
__FUNCTION__,
version->version_major,
version->version_minor,
version->version_patchlevel);
drmFreeVersion(version);
return FALSE;
}
 
ws->info.drm_major = version->version_major;
ws->info.drm_minor = version->version_minor;
ws->info.drm_patchlevel = version->version_patchlevel;
drmFreeVersion(version);
 
/* Get PCI ID. */
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
&ws->info.pci_id))
return FALSE;
 
/* Check PCI ID. */
switch (ws->info.pci_id) {
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R300; break;
#include "pci_ids/r300_pci_ids.h"
#undef CHIPSET
 
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R600; break;
#include "pci_ids/r600_pci_ids.h"
#undef CHIPSET
 
#define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_SI; break;
#include "pci_ids/radeonsi_pci_ids.h"
#undef CHIPSET
 
default:
fprintf(stderr, "radeon: Invalid PCI ID.\n");
return FALSE;
}
 
switch (ws->info.family) {
default:
case CHIP_UNKNOWN:
fprintf(stderr, "radeon: Unknown family.\n");
return FALSE;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV370:
case CHIP_RV380:
case CHIP_RS400:
case CHIP_RC410:
case CHIP_RS480:
ws->info.chip_class = R300;
break;
case CHIP_R420: /* R4xx-based cores. */
case CHIP_R423:
case CHIP_R430:
case CHIP_R480:
case CHIP_R481:
case CHIP_RV410:
case CHIP_RS600:
case CHIP_RS690:
case CHIP_RS740:
ws->info.chip_class = R400;
break;
case CHIP_RV515: /* R5xx-based cores. */
case CHIP_R520:
case CHIP_RV530:
case CHIP_R580:
case CHIP_RV560:
case CHIP_RV570:
ws->info.chip_class = R500;
break;
case CHIP_R600:
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV670:
case CHIP_RV620:
case CHIP_RV635:
case CHIP_RS780:
case CHIP_RS880:
ws->info.chip_class = R600;
break;
case CHIP_RV770:
case CHIP_RV730:
case CHIP_RV710:
case CHIP_RV740:
ws->info.chip_class = R700;
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
case CHIP_JUNIPER:
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_PALM:
case CHIP_SUMO:
case CHIP_SUMO2:
case CHIP_BARTS:
case CHIP_TURKS:
case CHIP_CAICOS:
ws->info.chip_class = EVERGREEN;
break;
case CHIP_CAYMAN:
case CHIP_ARUBA:
ws->info.chip_class = CAYMAN;
break;
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_VERDE:
case CHIP_OLAND:
case CHIP_HAINAN:
ws->info.chip_class = SI;
break;
case CHIP_BONAIRE:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_HAWAII:
case CHIP_MULLINS:
ws->info.chip_class = CIK;
break;
}
 
/* Check for dma */
ws->info.r600_has_dma = FALSE;
/* DMA is disabled on R700. There is IB corruption and hangs. */
if (ws->info.chip_class >= EVERGREEN && ws->info.drm_minor >= 27) {
ws->info.r600_has_dma = TRUE;
}
 
/* Check for UVD and VCE */
ws->info.has_uvd = FALSE;
ws->info.vce_fw_version = 0x00000000;
if (ws->info.drm_minor >= 32) {
uint32_t value = RADEON_CS_RING_UVD;
if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
"UVD Ring working", &value))
ws->info.has_uvd = value;
 
value = RADEON_CS_RING_VCE;
if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
NULL, &value) && value) {
 
if (radeon_get_drm_value(ws->fd, RADEON_INFO_VCE_FW_VERSION,
"VCE FW version", &value))
ws->info.vce_fw_version = value;
}
}
 
/* Check for userptr support. */
{
struct drm_radeon_gem_userptr args = {0};
 
/* If the ioctl doesn't exist, -EINVAL is returned.
*
* If the ioctl exists, it should return -EACCES
* if RADEON_GEM_USERPTR_READONLY or RADEON_GEM_USERPTR_REGISTER
* aren't set.
*/
ws->info.has_userptr =
drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_USERPTR,
&args, sizeof(args)) == -EACCES;
}
 
/* Get GEM info. */
retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
&gem_info, sizeof(gem_info));
if (retval) {
fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
retval);
return FALSE;
}
ws->info.gart_size = gem_info.gart_size;
ws->info.vram_size = gem_info.vram_size;
 
/* Get max clock frequency info and convert it to MHz */
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_SCLK, NULL,
&ws->info.max_sclk);
ws->info.max_sclk /= 1000;
 
radeon_get_drm_value(ws->fd, RADEON_INFO_SI_BACKEND_ENABLED_MASK, NULL,
&ws->info.si_backend_enabled_mask);
 
ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 
/* Generation-specific queries. */
if (ws->gen == DRV_R300) {
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
"GB pipe count",
&ws->info.r300_num_gb_pipes))
return FALSE;
 
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
"Z pipe count",
&ws->info.r300_num_z_pipes))
return FALSE;
}
else if (ws->gen >= DRV_R600) {
if (ws->info.drm_minor >= 9 &&
!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
"num backends",
&ws->info.r600_num_backends))
return FALSE;
 
/* get the GPU counter frequency, failure is not fatal */
radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
&ws->info.r600_clock_crystal_freq);
 
radeon_get_drm_value(ws->fd, RADEON_INFO_TILING_CONFIG, NULL,
&ws->info.r600_tiling_config);
 
if (ws->info.drm_minor >= 11) {
radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_TILE_PIPES, NULL,
&ws->info.r600_num_tile_pipes);
 
if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
&ws->info.r600_backend_map))
ws->info.r600_backend_map_valid = TRUE;
}
 
ws->info.r600_virtual_address = FALSE;
if (ws->info.drm_minor >= 13) {
uint32_t ib_vm_max_size;
 
ws->info.r600_virtual_address = TRUE;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
&ws->va_start))
ws->info.r600_virtual_address = FALSE;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
&ib_vm_max_size))
ws->info.r600_virtual_address = FALSE;
}
if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", FALSE))
ws->info.r600_virtual_address = FALSE;
}
 
/* Get max pipes, this is only needed for compute shaders. All evergreen+
* chips have at least 2 pipes, so we use 2 as a default. */
ws->info.r600_max_pipes = 2;
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
&ws->info.r600_max_pipes);
 
/* All GPUs have at least one compute unit */
ws->info.max_compute_units = 1;
radeon_get_drm_value(ws->fd, RADEON_INFO_ACTIVE_CU_COUNT, NULL,
&ws->info.max_compute_units);
 
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_SE, NULL,
&ws->info.max_se);
 
if (!ws->info.max_se) {
switch (ws->info.family) {
default:
ws->info.max_se = 1;
break;
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
case CHIP_BARTS:
case CHIP_CAYMAN:
case CHIP_TAHITI:
case CHIP_PITCAIRN:
case CHIP_BONAIRE:
ws->info.max_se = 2;
break;
case CHIP_HAWAII:
ws->info.max_se = 4;
break;
}
}
 
radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_SH_PER_SE, NULL,
&ws->info.max_sh_per_se);
 
radeon_get_drm_value(ws->fd, RADEON_INFO_ACCEL_WORKING2, NULL,
&ws->accel_working2);
if (ws->info.family == CHIP_HAWAII && ws->accel_working2 < 2) {
fprintf(stderr, "radeon: GPU acceleration for Hawaii disabled, "
"returned accel_working2 value %u is smaller than 2. "
"Please install a newer kernel.\n",
ws->accel_working2);
return FALSE;
}
 
if (radeon_get_drm_value(ws->fd, RADEON_INFO_SI_TILE_MODE_ARRAY, NULL,
ws->info.si_tile_mode_array)) {
ws->info.si_tile_mode_array_valid = TRUE;
}
 
if (radeon_get_drm_value(ws->fd, RADEON_INFO_CIK_MACROTILE_MODE_ARRAY, NULL,
ws->info.cik_macrotile_mode_array)) {
ws->info.cik_macrotile_mode_array_valid = TRUE;
}
 
return TRUE;
}
 
static void radeon_winsys_destroy(struct radeon_winsys *rws)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
 
if (ws->thread) {
ws->kill_thread = 1;
pipe_semaphore_signal(&ws->cs_queued);
pipe_thread_wait(ws->thread);
}
pipe_semaphore_destroy(&ws->cs_queued);
 
pipe_mutex_destroy(ws->hyperz_owner_mutex);
pipe_mutex_destroy(ws->cmask_owner_mutex);
pipe_mutex_destroy(ws->cs_stack_lock);
 
ws->cman->destroy(ws->cman);
ws->kman->destroy(ws->kman);
if (ws->gen >= DRV_R600) {
radeon_surface_manager_free(ws->surf_man);
}
FREE(rws);
}
 
static void radeon_query_info(struct radeon_winsys *rws,
struct radeon_info *info)
{
*info = ((struct radeon_drm_winsys *)rws)->info;
}
 
static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
enum radeon_feature_id fid,
boolean enable)
{
struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
 
switch (fid) {
case RADEON_FID_R300_HYPERZ_ACCESS:
return radeon_set_fd_access(cs, &cs->ws->hyperz_owner,
&cs->ws->hyperz_owner_mutex,
RADEON_INFO_WANT_HYPERZ, "Hyper-Z",
enable);
 
case RADEON_FID_R300_CMASK_ACCESS:
return radeon_set_fd_access(cs, &cs->ws->cmask_owner,
&cs->ws->cmask_owner_mutex,
RADEON_INFO_WANT_CMASK, "AA optimizations",
enable);
}
return FALSE;
}
 
static uint64_t radeon_query_value(struct radeon_winsys *rws,
enum radeon_value_id value)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
uint64_t retval = 0;
 
switch (value) {
case RADEON_REQUESTED_VRAM_MEMORY:
return ws->allocated_vram;
case RADEON_REQUESTED_GTT_MEMORY:
return ws->allocated_gtt;
case RADEON_BUFFER_WAIT_TIME_NS:
return ws->buffer_wait_time;
case RADEON_TIMESTAMP:
if (ws->info.drm_minor < 20 || ws->gen < DRV_R600) {
assert(0);
return 0;
}
 
radeon_get_drm_value(ws->fd, RADEON_INFO_TIMESTAMP, "timestamp",
(uint32_t*)&retval);
return retval;
case RADEON_NUM_CS_FLUSHES:
return ws->num_cs_flushes;
case RADEON_NUM_BYTES_MOVED:
radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BYTES_MOVED,
"num-bytes-moved", (uint32_t*)&retval);
return retval;
case RADEON_VRAM_USAGE:
radeon_get_drm_value(ws->fd, RADEON_INFO_VRAM_USAGE,
"vram-usage", (uint32_t*)&retval);
return retval;
case RADEON_GTT_USAGE:
radeon_get_drm_value(ws->fd, RADEON_INFO_GTT_USAGE,
"gtt-usage", (uint32_t*)&retval);
return retval;
case RADEON_GPU_TEMPERATURE:
radeon_get_drm_value(ws->fd, RADEON_INFO_CURRENT_GPU_TEMP,
"gpu-temp", (uint32_t*)&retval);
return retval;
case RADEON_CURRENT_SCLK:
radeon_get_drm_value(ws->fd, RADEON_INFO_CURRENT_GPU_SCLK,
"current-gpu-sclk", (uint32_t*)&retval);
return retval;
case RADEON_CURRENT_MCLK:
radeon_get_drm_value(ws->fd, RADEON_INFO_CURRENT_GPU_MCLK,
"current-gpu-mclk", (uint32_t*)&retval);
return retval;
}
return 0;
}
 
static void radeon_read_registers(struct radeon_winsys *rws,
unsigned reg_offset,
unsigned num_registers, uint32_t *out)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
unsigned i;
 
for (i = 0; i < num_registers; i++) {
uint32_t reg = reg_offset + i*4;
 
radeon_get_drm_value(ws->fd, RADEON_INFO_READ_REG, "read-reg", &reg);
out[i] = reg;
}
}
 
static unsigned hash_fd(void *key)
{
int fd = pointer_to_intptr(key);
struct stat stat;
fstat(fd, &stat);
 
return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
}
 
static int compare_fd(void *key1, void *key2)
{
int fd1 = pointer_to_intptr(key1);
int fd2 = pointer_to_intptr(key2);
struct stat stat1, stat2;
fstat(fd1, &stat1);
fstat(fd2, &stat2);
 
return stat1.st_dev != stat2.st_dev ||
stat1.st_ino != stat2.st_ino ||
stat1.st_rdev != stat2.st_rdev;
}
 
void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
{
retry:
pipe_mutex_lock(ws->cs_stack_lock);
if (ws->ncs >= RING_LAST) {
/* no room left for a flush */
pipe_mutex_unlock(ws->cs_stack_lock);
goto retry;
}
ws->cs_stack[ws->ncs++] = cs;
pipe_mutex_unlock(ws->cs_stack_lock);
pipe_semaphore_signal(&ws->cs_queued);
}
 
static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
{
struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
struct radeon_drm_cs *cs;
unsigned i;
 
while (1) {
pipe_semaphore_wait(&ws->cs_queued);
if (ws->kill_thread)
break;
 
pipe_mutex_lock(ws->cs_stack_lock);
cs = ws->cs_stack[0];
for (i = 1; i < ws->ncs; i++)
ws->cs_stack[i - 1] = ws->cs_stack[i];
ws->cs_stack[--ws->ncs] = NULL;
pipe_mutex_unlock(ws->cs_stack_lock);
 
if (cs) {
radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
pipe_semaphore_signal(&cs->flush_completed);
}
}
pipe_mutex_lock(ws->cs_stack_lock);
for (i = 0; i < ws->ncs; i++) {
pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
ws->cs_stack[i] = NULL;
}
ws->ncs = 0;
pipe_mutex_unlock(ws->cs_stack_lock);
return 0;
}
 
DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
 
static bool radeon_winsys_unref(struct radeon_winsys *ws)
{
struct radeon_drm_winsys *rws = (struct radeon_drm_winsys*)ws;
bool destroy;
 
/* When the reference counter drops to zero, remove the fd from the table.
* This must happen while the mutex is locked, so that
* radeon_drm_winsys_create in another thread doesn't get the winsys
* from the table when the counter drops to 0. */
pipe_mutex_lock(fd_tab_mutex);
 
destroy = pipe_reference(&rws->reference, NULL);
if (destroy && fd_tab)
util_hash_table_remove(fd_tab, intptr_to_pointer(rws->fd));
 
pipe_mutex_unlock(fd_tab_mutex);
return destroy;
}
 
PUBLIC struct radeon_winsys *
radeon_drm_winsys_create(int fd, radeon_screen_create_t screen_create)
{
struct radeon_drm_winsys *ws;
 
pipe_mutex_lock(fd_tab_mutex);
if (!fd_tab) {
fd_tab = util_hash_table_create(hash_fd, compare_fd);
}
 
ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
if (ws) {
pipe_reference(NULL, &ws->reference);
pipe_mutex_unlock(fd_tab_mutex);
return &ws->base;
}
 
ws = CALLOC_STRUCT(radeon_drm_winsys);
if (!ws) {
pipe_mutex_unlock(fd_tab_mutex);
return NULL;
}
 
ws->fd = fd;
 
if (!do_winsys_init(ws))
goto fail;
 
/* Create managers. */
ws->kman = radeon_bomgr_create(ws);
if (!ws->kman)
goto fail;
 
ws->cman = pb_cache_manager_create(ws->kman, 1000000, 2.0f, 0,
MIN2(ws->info.vram_size, ws->info.gart_size));
if (!ws->cman)
goto fail;
 
if (ws->gen >= DRV_R600) {
ws->surf_man = radeon_surface_manager_new(fd);
if (!ws->surf_man)
goto fail;
}
 
/* init reference */
pipe_reference_init(&ws->reference, 1);
 
/* Set functions. */
ws->base.unref = radeon_winsys_unref;
ws->base.destroy = radeon_winsys_destroy;
ws->base.query_info = radeon_query_info;
ws->base.cs_request_feature = radeon_cs_request_feature;
ws->base.query_value = radeon_query_value;
ws->base.read_registers = radeon_read_registers;
 
radeon_bomgr_init_functions(ws);
radeon_drm_cs_init_functions(ws);
radeon_surface_init_functions(ws);
 
pipe_mutex_init(ws->hyperz_owner_mutex);
pipe_mutex_init(ws->cmask_owner_mutex);
pipe_mutex_init(ws->cs_stack_lock);
 
ws->ncs = 0;
pipe_semaphore_init(&ws->cs_queued, 0);
if (ws->num_cpus > 1 && debug_get_option_thread())
ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
 
/* Create the screen at the end. The winsys must be initialized
* completely.
*
* Alternatively, we could create the screen based on "ws->gen"
* and link all drivers into one binary blob. */
ws->base.screen = screen_create(&ws->base);
if (!ws->base.screen) {
radeon_winsys_destroy(&ws->base);
pipe_mutex_unlock(fd_tab_mutex);
return NULL;
}
 
util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
 
/* We must unlock the mutex once the winsys is fully initialized, so that
* other threads attempting to create the winsys from the same fd will
* get a fully initialized winsys and not just half-way initialized. */
pipe_mutex_unlock(fd_tab_mutex);
 
return &ws->base;
 
fail:
pipe_mutex_unlock(fd_tab_mutex);
if (ws->cman)
ws->cman->destroy(ws->cman);
if (ws->kman)
ws->kman->destroy(ws->kman);
if (ws->surf_man)
radeon_surface_manager_free(ws->surf_man);
FREE(ws);
return NULL;
}
/contrib/sdk/sources/Mesa/mesa-10.6.0/src/gallium/winsys/radeon/drm/radeon_drm_winsys.h
0,0 → 1,108
/*
* Copyright © 2009 Corbin Simpson
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
* AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors:
* Corbin Simpson <MostAwesomeDude@gmail.com>
*/
#ifndef RADEON_DRM_WINSYS_H
#define RADEON_DRM_WINSYS_H
 
#include "gallium/drivers/radeon/radeon_winsys.h"
#include "os/os_thread.h"
#include <radeon_drm.h>
 
#ifndef DRM_RADEON_GEM_USERPTR
 
#define DRM_RADEON_GEM_USERPTR 0x2d
 
#define RADEON_GEM_USERPTR_READONLY (1 << 0)
#define RADEON_GEM_USERPTR_ANONONLY (1 << 1)
#define RADEON_GEM_USERPTR_VALIDATE (1 << 2)
#define RADEON_GEM_USERPTR_REGISTER (1 << 3)
 
struct drm_radeon_gem_userptr {
uint64_t addr;
uint64_t size;
uint32_t flags;
uint32_t handle;
};
 
#endif
 
struct radeon_drm_cs;
 
enum radeon_generation {
DRV_R300,
DRV_R600,
DRV_SI
};
 
struct radeon_drm_winsys {
struct radeon_winsys base;
struct pipe_reference reference;
 
int fd; /* DRM file descriptor */
int num_cs; /* The number of command streams created. */
uint64_t allocated_vram;
uint64_t allocated_gtt;
uint64_t buffer_wait_time; /* time spent in buffer_wait in ns */
uint64_t num_cs_flushes;
 
enum radeon_generation gen;
struct radeon_info info;
uint32_t va_start;
uint32_t accel_working2;
 
struct pb_manager *kman;
struct pb_manager *cman;
struct radeon_surface_manager *surf_man;
 
uint32_t num_cpus; /* Number of CPUs. */
 
struct radeon_drm_cs *hyperz_owner;
pipe_mutex hyperz_owner_mutex;
struct radeon_drm_cs *cmask_owner;
pipe_mutex cmask_owner_mutex;
 
/* rings submission thread */
pipe_mutex cs_stack_lock;
pipe_semaphore cs_queued;
pipe_thread thread;
int kill_thread;
int ncs;
struct radeon_drm_cs *cs_stack[RING_LAST];
};
 
static INLINE struct radeon_drm_winsys *
radeon_drm_winsys(struct radeon_winsys *base)
{
return (struct radeon_drm_winsys*)base;
}
 
void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs);
void radeon_surface_init_functions(struct radeon_drm_winsys *ws);
 
#endif