/drivers/ddk/core.S |
---|
18,9 → 18,6 |
.global _DestroyEvent |
.global _DestroyObject |
.global _DiskAdd |
.global _DiskMediaChanged |
.global _FreeKernelSpace |
.global _FreePage |
85,9 → 82,6 |
.def _DestroyEvent; .scl 2; .type 32; .endef |
.def _DestroyObject; .scl 2; .type 32; .endef |
.def _DiskAdd; .scl 2; .type 32; .endef |
.def _DiskMediaChanged; .scl 2; .type 32; .endef |
.def _FreeKernelSpace; .scl 2; .type 32; .endef |
.def _FreePage; .scl 2; .type 32; .endef |
152,9 → 146,6 |
_DestroyEvent: |
_DestroyObject: |
_DiskAdd: |
_DiskMediaChanged: |
_FreeKernelSpace: |
_FreePage: |
223,9 → 214,6 |
.ascii " -export:DestroyEvent" |
.ascii " -export:DestroyObject" |
.ascii " -export:DiskAdd" # stdcall |
.ascii " -export:DiskMediaChanged" # stdcall |
.ascii " -export:FreeKernelSpace" # stdcall |
.ascii " -export:FreePage" # |
/drivers/include/drm/drm_crtc.h |
---|
817,7 → 817,7 |
/* output poll support */ |
bool poll_enabled; |
bool poll_running; |
struct delayed_work output_poll_work; |
// struct delayed_work output_poll_work; |
/* pointers to standard properties */ |
struct list_head property_blob_list; |
/drivers/include/linux/bug.h |
---|
58,8 → 58,6 |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \ |
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0)) |
#endif |
/drivers/include/linux/kernel.h |
---|
87,16 → 87,10 |
return buf; |
} |
enum { |
DUMP_PREFIX_NONE, |
DUMP_PREFIX_ADDRESS, |
DUMP_PREFIX_OFFSET |
}; |
extern int hex_to_bin(char ch); |
extern void hex2bin(u8 *dst, const char *src, size_t count); |
int hex_to_bin(char ch); |
int hex2bin(u8 *dst, const char *src, size_t count); |
//int printk(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
341,9 → 335,9 |
#define dev_info(dev, format, arg...) \ |
printk("Info %s " format , __func__, ## arg) |
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
#define BUILD_BUG_ON(condition) |
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) |
struct page |
{ |
unsigned int addr; |
/drivers/include/linux/wait.h |
---|
172,13 → 172,6 |
struct work_struct work; |
}; |
static inline struct delayed_work *to_delayed_work(struct work_struct *work) |
{ |
return container_of(work, struct delayed_work, work); |
} |
struct workqueue_struct *alloc_workqueue_key(const char *fmt, |
unsigned int flags, int max_active); |
189,13 → 182,6 |
int queue_delayed_work(struct workqueue_struct *wq, |
struct delayed_work *dwork, unsigned long delay); |
#define INIT_WORK(_work, _func) \ |
do { \ |
INIT_LIST_HEAD(&(_work)->entry); \ |
(_work)->func = _func; \ |
} while (0) |
#define INIT_DELAYED_WORK(_work, _func) \ |
do { \ |
INIT_LIST_HEAD(&(_work)->work.entry); \ |
221,7 → 207,5 |
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function) |
#endif |
/drivers/include/linux/compiler-gcc4.h |
---|
4,7 → 4,7 |
/* GCC 4.1.[01] miscompiles __weak */ |
#ifdef __KERNEL__ |
# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 |
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1 |
# error Your version of gcc miscompiles the __weak directive |
# endif |
#endif |
13,11 → 13,7 |
#define __must_check __attribute__((warn_unused_result)) |
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b) |
#if GCC_VERSION >= 40100 |
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
#endif |
#if GCC_VERSION >= 40300 |
#if __GNUC_MINOR__ >= 3 |
/* Mark functions as cold. gcc will assume any path leading to a call |
to them will be unlikely. This means a lot of manual unlikely()s |
are unnecessary now for any paths leading to the usual suspects |
33,15 → 29,9 |
the kernel context */ |
#define __cold __attribute__((__cold__)) |
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) |
#define __linktime_error(message) __attribute__((__error__(message))) |
#ifndef __CHECKER__ |
# define __compiletime_warning(message) __attribute__((warning(message))) |
# define __compiletime_error(message) __attribute__((error(message))) |
#endif /* __CHECKER__ */ |
#endif /* GCC_VERSION >= 40300 */ |
#if GCC_VERSION >= 40500 |
#if __GNUC_MINOR__ >= 5 |
/* |
* Mark a position in code as unreachable. This can be used to |
* suppress control flow warnings after asm blocks that transfer |
56,9 → 46,10 |
/* Mark a function definition as prohibited from being cloned. */ |
#define __noclone __attribute__((__noclone__)) |
#endif /* GCC_VERSION >= 40500 */ |
#endif |
#endif |
#if GCC_VERSION >= 40600 |
#if __GNUC_MINOR__ >= 6 |
/* |
* Tell the optimizer that something else uses this function or variable. |
*/ |
65,13 → 56,20 |
#define __visible __attribute__((externally_visible)) |
#endif |
#if __GNUC_MINOR__ > 0 |
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) |
#endif |
#if __GNUC_MINOR__ >= 3 && !defined(__CHECKER__) |
#define __compiletime_warning(message) __attribute__((warning(message))) |
#define __compiletime_error(message) __attribute__((error(message))) |
#endif |
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP |
#if GCC_VERSION >= 40400 |
#if __GNUC_MINOR__ >= 4 |
#define __HAVE_BUILTIN_BSWAP32__ |
#define __HAVE_BUILTIN_BSWAP64__ |
#endif |
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) |
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6) |
#define __HAVE_BUILTIN_BSWAP16__ |
#endif |
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ |
#endif |
/drivers/include/linux/compiler.h |
---|
170,11 → 170,6 |
(typeof(ptr)) (__ptr + (off)); }) |
#endif |
/* Not-quite-unique ID. */ |
#ifndef __UNIQUE_ID |
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__) |
#endif |
#endif /* __KERNEL__ */ |
#endif /* __ASSEMBLY__ */ |
307,36 → 302,10 |
#endif |
#ifndef __compiletime_error |
# define __compiletime_error(message) |
# define __compiletime_error_fallback(condition) \ |
do { ((void)sizeof(char[1 - 2 * condition])); } while (0) |
#else |
# define __compiletime_error_fallback(condition) do { } while (0) |
#endif |
#define __compiletime_assert(condition, msg, prefix, suffix) \ |
do { \ |
bool __cond = !(condition); \ |
extern void prefix ## suffix(void) __compiletime_error(msg); \ |
if (__cond) \ |
prefix ## suffix(); \ |
__compiletime_error_fallback(__cond); \ |
} while (0) |
#define _compiletime_assert(condition, msg, prefix, suffix) \ |
__compiletime_assert(condition, msg, prefix, suffix) |
/** |
* compiletime_assert - break build and emit msg if condition is false |
* @condition: a compile-time constant condition to check |
* @msg: a message to emit if condition is false |
* |
* In tradition of POSIX assert, this macro will break the build if the |
* supplied condition is *false*, emitting the supplied error message if the |
* compiler has support to do so. |
*/ |
#define compiletime_assert(condition, msg) \ |
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
#ifndef __linktime_error |
# define __linktime_error(message) |
#endif |
/* |
* Prevent the compiler from merging or refetching accesses. The compiler |
* is also forbidden from reordering successive instances of ACCESS_ONCE(), |
/drivers/include/linux/compiler-gcc.h |
---|
5,9 → 5,6 |
/* |
* Common definitions for all gcc versions go here. |
*/ |
#define GCC_VERSION (__GNUC__ * 10000 \ |
+ __GNUC_MINOR__ * 100 \ |
+ __GNUC_PATCHLEVEL__) |
/* Optimization barrier */ |
/drivers/include/linux/compiler-gcc3.h |
---|
2,22 → 2,22 |
#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead." |
#endif |
#if GCC_VERSION < 30200 |
#if __GNUC_MINOR__ < 2 |
# error Sorry, your compiler is too old - please upgrade it. |
#endif |
#if GCC_VERSION >= 30300 |
#if __GNUC_MINOR__ >= 3 |
# define __used __attribute__((__used__)) |
#else |
# define __used __attribute__((__unused__)) |
#endif |
#if GCC_VERSION >= 30400 |
#if __GNUC_MINOR__ >= 4 |
#define __must_check __attribute__((warn_unused_result)) |
#endif |
#ifdef CONFIG_GCOV_KERNEL |
# if GCC_VERSION < 30400 |
# if __GNUC_MINOR__ < 4 |
# error "GCOV profiling support for gcc versions below 3.4 not included" |
# endif /* __GNUC_MINOR__ */ |
#endif /* CONFIG_GCOV_KERNEL */ |
/drivers/video/drm/drm_fb_helper.c |
---|
52,36 → 52,9 |
* mode setting driver. They can be used mostly independantely from the crtc |
* helper functions used by many drivers to implement the kernel mode setting |
* interfaces. |
* |
* Initialization is done as a three-step process with drm_fb_helper_init(), |
* drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config(). |
* Drivers with fancier requirements than the default beheviour can override the |
* second step with their own code. Teardown is done with drm_fb_helper_fini(). |
* |
* At runtime drivers should restore the fbdev console by calling |
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They |
* should also notify the fb helper code from updates to the output |
* configuration by calling drm_fb_helper_hotplug_event(). For easier |
* integration with the output polling code in drm_crtc_helper.c the modeset |
* code proves a ->output_poll_changed callback. |
* |
* All other functions exported by the fb helper library can be used to |
* implement the fbdev driver interface by the driver. |
*/ |
/** |
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev |
* emulation helper |
* @fb_helper: fbdev initialized with drm_fb_helper_init |
* |
* This functions adds all the available connectors for use with the given |
* fb_helper. This is a separate step to allow drivers to freely assign |
* connectors to the fbdev, e.g. if some are reserved for special purposes or |
* not adequate to be used for the fbcon. |
* |
* Since this is part of the initial setup before the fbdev is published, no |
* locking is required. |
*/ |
/* simple single crtc case helper function */ |
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper) |
{ |
struct drm_device *dev = fb_helper->dev; |
137,24 → 110,7 |
} |
static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper) |
{ |
struct drm_device *dev = fb_helper->dev; |
struct drm_crtc *crtc; |
int bound = 0, crtcs_bound = 0; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (crtc->fb) |
crtcs_bound++; |
if (crtc->fb == fb_helper->fb) |
bound++; |
} |
if (bound < crtcs_bound) |
return false; |
return true; |
} |
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode) |
{ |
struct drm_fb_helper *fb_helper = info->par; |
164,20 → 120,9 |
int i, j; |
/* |
* fbdev->blank can be called from irq context in case of a panic. |
* Since we already have our own special panic handler which will |
* restore the fbdev console mode completely, just bail out early. |
*/ |
/* |
* For each CRTC in this fb, turn the connectors on/off. |
*/ |
drm_modeset_lock_all(dev); |
if (!drm_fb_helper_is_bound(fb_helper)) { |
drm_modeset_unlock_all(dev); |
return; |
} |
mutex_lock(&dev->mode_config.mutex); |
for (i = 0; i < fb_helper->crtc_count; i++) { |
crtc = fb_helper->crtc_info[i].mode_set.crtc; |
192,14 → 137,9 |
dev->mode_config.dpms_property, dpms_mode); |
} |
} |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
} |
/** |
* drm_fb_helper_blank - implementation for ->fb_blank |
* @blank: desired blanking state |
* @info: fbdev registered by the helper |
*/ |
int drm_fb_helper_blank(int blank, struct fb_info *info) |
{ |
switch (blank) { |
243,24 → 183,6 |
kfree(helper->crtc_info); |
} |
/** |
* drm_fb_helper_init - initialize a drm_fb_helper structure |
* @dev: drm device |
* @fb_helper: driver-allocated fbdev helper structure to initialize |
* @crtc_count: maximum number of crtcs to support in this fbdev emulation |
* @max_conn_count: max connector count |
* |
* This allocates the structures for the fbdev helper with the given limits. |
* Note that this won't yet touch the hardware (through the driver interfaces) |
* nor register the fbdev. This is only done in drm_fb_helper_initial_config() |
* to allow driver writes more control over the exact init sequence. |
* |
* Drivers must set fb_helper->funcs before calling |
* drm_fb_helper_initial_config(). |
* |
* RETURNS: |
* Zero if everything went ok, nonzero otherwise. |
*/ |
int drm_fb_helper_init(struct drm_device *dev, |
struct drm_fb_helper *fb_helper, |
int crtc_count, int max_conn_count) |
372,11 → 294,6 |
return 0; |
} |
/** |
* drm_fb_helper_setcmap - implementation for ->fb_setcmap |
* @cmap: cmap to set |
* @info: fbdev registered by the helper |
*/ |
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) |
{ |
struct drm_fb_helper *fb_helper = info->par; |
416,11 → 333,6 |
} |
EXPORT_SYMBOL(drm_fb_helper_setcmap); |
/** |
* drm_fb_helper_check_var - implementation for ->fb_check_var |
* @var: screeninfo to check |
* @info: fbdev registered by the helper |
*/ |
int drm_fb_helper_check_var(struct fb_var_screeninfo *var, |
struct fb_info *info) |
{ |
513,19 → 425,13 |
} |
EXPORT_SYMBOL(drm_fb_helper_check_var); |
/** |
* drm_fb_helper_set_par - implementation for ->fb_set_par |
* @info: fbdev registered by the helper |
* |
* This will let fbcon do the mode init and is called at initialization time by |
* the fbdev core when registering the driver, and later on through the hotplug |
* callback. |
*/ |
/* this will let fbcon do the mode init */ |
int drm_fb_helper_set_par(struct fb_info *info) |
{ |
struct drm_fb_helper *fb_helper = info->par; |
struct drm_device *dev = fb_helper->dev; |
struct fb_var_screeninfo *var = &info->var; |
struct drm_crtc *crtc; |
int ret; |
int i; |
534,29 → 440,25 |
return -EINVAL; |
} |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
for (i = 0; i < fb_helper->crtc_count; i++) { |
ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set); |
crtc = fb_helper->crtc_info[i].mode_set.crtc; |
ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set); |
if (ret) { |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
} |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
if (fb_helper->delayed_hotplug) { |
fb_helper->delayed_hotplug = false; |
drm_fb_helper_hotplug_event(fb_helper); |
// drm_fb_helper_hotplug_event(fb_helper); |
} |
return 0; |
} |
EXPORT_SYMBOL(drm_fb_helper_set_par); |
/** |
* drm_fb_helper_pan_display - implementation for ->fb_pan_display |
* @var: updated screen information |
* @info: fbdev registered by the helper |
*/ |
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, |
struct fb_info *info) |
{ |
567,12 → 469,7 |
int ret = 0; |
int i; |
drm_modeset_lock_all(dev); |
if (!drm_fb_helper_is_bound(fb_helper)) { |
drm_modeset_unlock_all(dev); |
return -EBUSY; |
} |
mutex_lock(&dev->mode_config.mutex); |
for (i = 0; i < fb_helper->crtc_count; i++) { |
crtc = fb_helper->crtc_info[i].mode_set.crtc; |
582,7 → 479,7 |
modeset->y = var->yoffset; |
if (modeset->num_connectors) { |
ret = drm_mode_set_config_internal(modeset); |
ret = crtc->funcs->set_config(modeset); |
if (!ret) { |
info->var.xoffset = var->xoffset; |
info->var.yoffset = var->yoffset; |
589,20 → 486,15 |
} |
} |
} |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
EXPORT_SYMBOL(drm_fb_helper_pan_display); |
/* |
* Allocates the backing storage and sets up the fbdev info structure through |
* the ->fb_probe callback and then registers the fbdev and sets up the panic |
* notifier. |
*/ |
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, |
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper, |
int preferred_bpp) |
{ |
int ret = 0; |
int new_fb = 0; |
int crtc_count = 0; |
int i; |
struct fb_info *info; |
680,44 → 572,34 |
} |
/* push down into drivers */ |
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); |
if (ret < 0) |
return ret; |
new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes); |
if (new_fb < 0) |
return new_fb; |
info = fb_helper->fbdev; |
/* |
* Set the fb pointer - usually drm_setup_crtcs does this for hotplug |
* events, but at init time drm_setup_crtcs needs to be called before |
* the fb is allocated (since we need to figure out the desired size of |
* the fb before we can allocate it ...). Hence we need to fix things up |
* here again. |
*/ |
/* set the fb pointer */ |
for (i = 0; i < fb_helper->crtc_count; i++) |
if (fb_helper->crtc_info[i].mode_set.num_connectors) |
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb; |
if (new_fb) { |
info->var.pixclock = 0; |
// dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n", |
// info->node, info->fix.id); |
} else { |
drm_fb_helper_set_par(info); |
} |
if (new_fb) |
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list); |
return 0; |
} |
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe); |
/** |
* drm_fb_helper_fill_fix - initializes fixed fbdev information |
* @info: fbdev registered by the helper |
* @pitch: desired pitch |
* @depth: desired depth |
* |
* Helper to fill in the fixed fbdev information useful for a non-accelerated |
* fbdev emulations. Drivers which support acceleration methods which impose |
* additional constraints need to set up their own limits. |
* |
* Drivers should call this (or their equivalent setup code) from their |
* ->fb_probe callback. |
*/ |
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch, |
uint32_t depth) |
{ |
738,20 → 620,6 |
} |
EXPORT_SYMBOL(drm_fb_helper_fill_fix); |
/** |
* drm_fb_helper_fill_var - initalizes variable fbdev information |
* @info: fbdev instance to set up |
* @fb_helper: fb helper instance to use as template |
* @fb_width: desired fb width |
* @fb_height: desired fb height |
* |
* Sets up the variable fbdev metainformation from the given fb helper instance |
* and the drm framebuffer allocated in fb_helper->fb. |
* |
* Drivers should call this (or their equivalent setup code) from their |
* ->fb_probe callback after having allocated the fbdev backing |
* storage framebuffer. |
*/ |
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper, |
uint32_t fb_width, uint32_t fb_height) |
{ |
1069,7 → 937,6 |
for (i = 0; i < fb_helper->crtc_count; i++) { |
modeset = &fb_helper->crtc_info[i].mode_set; |
modeset->num_connectors = 0; |
modeset->fb = NULL; |
} |
for (i = 0; i < fb_helper->connector_count; i++) { |
1086,21 → 953,9 |
modeset->mode = drm_mode_duplicate(dev, |
fb_crtc->desired_mode); |
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; |
modeset->fb = fb_helper->fb; |
} |
} |
/* Clear out any old modes if there are no more connected outputs. */ |
for (i = 0; i < fb_helper->crtc_count; i++) { |
modeset = &fb_helper->crtc_info[i].mode_set; |
if (modeset->num_connectors == 0) { |
BUG_ON(modeset->fb); |
BUG_ON(modeset->num_connectors); |
if (modeset->mode) |
drm_mode_destroy(dev, modeset->mode); |
modeset->mode = NULL; |
} |
} |
out: |
kfree(crtcs); |
kfree(modes); |
1108,23 → 963,18 |
} |
/** |
* drm_fb_helper_initial_config - setup a sane initial connector configuration |
* drm_helper_initial_config - setup a sane initial connector configuration |
* @fb_helper: fb_helper device struct |
* @bpp_sel: bpp value to use for the framebuffer configuration |
* |
* LOCKING: |
* Called at init time by the driver to set up the @fb_helper initial |
* configuration, must take the mode config lock. |
* |
* Scans the CRTCs and connectors and tries to put together an initial setup. |
* At the moment, this is a cloned configuration across all heads with |
* a new framebuffer object as the backing store. |
* |
* Note that this also registers the fbdev and so allows userspace to call into |
* the driver through the fbdev interfaces. |
* |
* This function will call down into the ->fb_probe callback to let |
* the driver allocate and initialize the fbdev info structure and the drm |
* framebuffer used to back the fbdev. drm_fb_helper_fill_var() and |
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default |
* values for the fbdev info structure. |
* |
* RETURNS: |
* Zero if everything went ok, nonzero otherwise. |
*/ |
1133,6 → 983,9 |
struct drm_device *dev = fb_helper->dev; |
int count = 0; |
/* disable all the possible outputs/crtcs before entering KMS mode */ |
drm_helper_disable_unused_functions(fb_helper->dev); |
// drm_fb_helper_parse_command_line(fb_helper); |
count = drm_fb_helper_probe_connector_modes(fb_helper, |
1150,22 → 1003,18 |
} |
EXPORT_SYMBOL(drm_fb_helper_initial_config); |
#if 0 |
/** |
* drm_fb_helper_hotplug_event - respond to a hotplug notification by |
* probing all the outputs attached to the fb |
* @fb_helper: the drm_fb_helper |
* |
* LOCKING: |
* Called at runtime, must take mode config lock. |
* |
* Scan the connectors attached to the fb_helper and try to put together a |
* setup after *notification of a change in output configuration. |
* |
* Called at runtime, takes the mode config locks to be able to check/change the |
* modeset configuration. Must be run from process context (which usually means |
* either the output polling work or a work item launched from the driver's |
* hotplug interrupt). |
* |
* Note that the driver must ensure that this is only called _after_ the fb has |
* been fully set up, i.e. after the call to drm_fb_helper_initial_config. |
* |
* RETURNS: |
* 0 on success and a non-zero error code otherwise. |
*/ |
1174,14 → 1023,23 |
struct drm_device *dev = fb_helper->dev; |
int count = 0; |
u32 max_width, max_height, bpp_sel; |
int bound = 0, crtcs_bound = 0; |
struct drm_crtc *crtc; |
if (!fb_helper->fb) |
return 0; |
mutex_lock(&fb_helper->dev->mode_config.mutex); |
if (!drm_fb_helper_is_bound(fb_helper)) { |
mutex_lock(&dev->mode_config.mutex); |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (crtc->fb) |
crtcs_bound++; |
if (crtc->fb == fb_helper->fb) |
bound++; |
} |
if (bound < crtcs_bound) { |
fb_helper->delayed_hotplug = true; |
mutex_unlock(&fb_helper->dev->mode_config.mutex); |
mutex_unlock(&dev->mode_config.mutex); |
return 0; |
} |
DRM_DEBUG_KMS("\n"); |
1192,16 → 1050,13 |
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width, |
max_height); |
mutex_unlock(&fb_helper->dev->mode_config.mutex); |
drm_modeset_lock_all(dev); |
drm_setup_crtcs(fb_helper); |
drm_modeset_unlock_all(dev); |
drm_fb_helper_set_par(fb_helper->fbdev); |
mutex_unlock(&dev->mode_config.mutex); |
return 0; |
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel); |
} |
EXPORT_SYMBOL(drm_fb_helper_hotplug_event); |
#endif |
/drivers/video/drm/drm_gem.c |
---|
217,9 → 217,6 |
* we may want to use ida for number allocation and a hash table |
* for the pointers, anyway. |
*/ |
if(handle == -2) |
printf("%s handle %d\n", __FUNCTION__, handle); |
spin_lock(&filp->table_lock); |
/* Check if we currently have a reference on the object */ |
260,19 → 257,21 |
int ret; |
/* |
* Get the user-visible handle using idr. Preload and perform |
* allocation under our spinlock. |
* Get the user-visible handle using idr. |
*/ |
idr_preload(GFP_KERNEL); |
again: |
/* ensure there is space available to allocate a handle */ |
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) |
return -ENOMEM; |
/* do the allocation under our spinlock */ |
spin_lock(&file_priv->table_lock); |
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); |
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); |
spin_unlock(&file_priv->table_lock); |
idr_preload_end(); |
if (ret < 0) |
if (ret == -EAGAIN) |
goto again; |
else if (ret) |
return ret; |
*handlep = ret; |
drm_gem_object_handle_reference(obj); |
385,9 → 384,6 |
{ |
struct drm_gem_object *obj; |
if(handle == -2) |
printf("%s handle %d\n", __FUNCTION__, handle); |
spin_lock(&filp->table_lock); |
/* Check if we currently have a reference on the object */ |
443,18 → 439,23 |
if (obj == NULL) |
return -ENOENT; |
idr_preload(GFP_KERNEL); |
again: |
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { |
ret = -ENOMEM; |
goto err; |
} |
spin_lock(&dev->object_name_lock); |
if (!obj->name) { |
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); |
obj->name = ret; |
ret = idr_get_new_above(&dev->object_name_idr, obj, 1, |
&obj->name); |
args->name = (uint64_t) obj->name; |
spin_unlock(&dev->object_name_lock); |
idr_preload_end(); |
if (ret < 0) |
if (ret == -EAGAIN) |
goto again; |
else if (ret) |
goto err; |
ret = 0; |
/* Allocate a reference for the name table. */ |
drm_gem_object_reference(obj); |
461,7 → 462,6 |
} else { |
args->name = (uint64_t) obj->name; |
spin_unlock(&dev->object_name_lock); |
idr_preload_end(); |
ret = 0; |
} |
488,9 → 488,6 |
if (!(dev->driver->driver_features & DRIVER_GEM)) |
return -ENODEV; |
if(handle == -2) |
printf("%s handle %d\n", __FUNCTION__, handle); |
spin_lock(&dev->object_name_lock); |
obj = idr_find(&dev->object_name_idr, (int) args->name); |
if (obj) |
552,6 → 549,8 |
{ |
idr_for_each(&file_private->object_idr, |
&drm_gem_object_release_handle, file_private); |
idr_remove_all(&file_private->object_idr); |
idr_destroy(&file_private->object_idr); |
} |
#endif |
/drivers/video/drm/i915/i915_dma.c |
---|
997,12 → 997,6 |
case I915_PARAM_HAS_PINNED_BATCHES: |
value = 1; |
break; |
case I915_PARAM_HAS_EXEC_NO_RELOC: |
value = 1; |
break; |
case I915_PARAM_HAS_EXEC_HANDLE_LUT: |
value = 1; |
break; |
default: |
DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
param->param); |
1057,7 → 1051,54 |
#endif |
static int i915_set_status_page(struct drm_device *dev, void *data, |
struct drm_file *file_priv) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
drm_i915_hws_addr_t *hws = data; |
struct intel_ring_buffer *ring; |
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
return -ENODEV; |
if (!I915_NEED_GFX_HWS(dev)) |
return -EINVAL; |
if (!dev_priv) { |
DRM_ERROR("called with no initialization\n"); |
return -EINVAL; |
} |
if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
WARN(1, "tried to set status page when mode setting active\n"); |
return 0; |
} |
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr); |
ring = LP_RING(dev_priv); |
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12); |
dev_priv->dri1.gfx_hws_cpu_addr = |
ioremap(dev_priv->mm.gtt_base_addr + hws->addr, 4096); |
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) { |
i915_dma_cleanup(dev); |
ring->status_page.gfx_addr = 0; |
DRM_ERROR("can not ioremap virtual address for" |
" G33 hw status page\n"); |
return -ENOMEM; |
} |
memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE); |
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr); |
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n", |
ring->status_page.gfx_addr); |
DRM_DEBUG_DRIVER("load hws at %p\n", |
ring->status_page.page_addr); |
return 0; |
} |
static int i915_get_bridge_dev(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
1159,21 → 1200,18 |
if (ret) |
goto cleanup_vga_switcheroo; |
ret = drm_irq_install(dev); |
if (ret) |
goto cleanup_gem_stolen; |
/* Important: The output setup functions called by modeset_init need |
* working irqs for e.g. gmbus and dp aux transfers. */ |
intel_modeset_init(dev); |
ret = i915_gem_init(dev); |
if (ret) |
goto cleanup_irq; |
goto cleanup_gem_stolen; |
intel_modeset_gem_init(dev); |
ret = drm_irq_install(dev); |
if (ret) |
goto cleanup_gem; |
/* Always safe in the mode setting case. */ |
/* FIXME: do pre/post-mode set stuff in core KMS code */ |
dev->vblank_disable_allowed = 1; |
1180,40 → 1218,22 |
ret = intel_fbdev_init(dev); |
if (ret) |
goto cleanup_gem; |
goto cleanup_irq; |
/* Only enable hotplug handling once the fbdev is fully set up. */ |
intel_hpd_init(dev); |
// drm_kms_helper_poll_init(dev); |
/* |
* Some ports require correctly set-up hpd registers for detection to |
* work properly (leading to ghost connected connector status), e.g. VGA |
* on gm45. Hence we can only set up the initial fbdev config after hpd |
* irqs are fully enabled. Now we should scan for the initial config |
* only once hotplug handling is enabled, but due to screwed-up locking |
* around kms/fbdev init we can't protect the fdbev initial config |
* scanning against hotplug events. Hence do this first and ignore the |
* tiny window where we will loose hotplug notifactions. |
*/ |
intel_fbdev_initial_config(dev); |
/* Only enable hotplug handling once the fbdev is fully set up. */ |
dev_priv->enable_hotplug_processing = true; |
drm_kms_helper_poll_init(dev); |
/* We're off and running w/KMS */ |
dev_priv->mm.suspended = 0; |
return 0; |
cleanup_gem: |
mutex_lock(&dev->struct_mutex); |
i915_gem_cleanup_ringbuffer(dev); |
mutex_unlock(&dev->struct_mutex); |
i915_gem_cleanup_aliasing_ppgtt(dev); |
cleanup_irq: |
// drm_irq_uninstall(dev); |
cleanup_gem: |
// mutex_lock(&dev->struct_mutex); |
// i915_gem_cleanup_ringbuffer(dev); |
// mutex_unlock(&dev->struct_mutex); |
// i915_gem_cleanup_aliasing_ppgtt(dev); |
cleanup_gem_stolen: |
// i915_gem_cleanup_stolen(dev); |
cleanup_vga_switcheroo: |
1316,7 → 1336,8 |
goto put_gmch; |
} |
aperture_size = dev_priv->gtt.mappable_end; |
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr; |
1368,12 → 1389,11 |
*/ |
spin_lock_init(&dev_priv->irq_lock); |
spin_lock_init(&dev_priv->gpu_error.lock); |
spin_lock_init(&dev_priv->error_lock); |
spin_lock_init(&dev_priv->rps.lock); |
mutex_init(&dev_priv->dpio_lock); |
spin_lock_init(&dev_priv->dpio_lock); |
mutex_init(&dev_priv->rps.hw_lock); |
mutex_init(&dev_priv->modeset_restore_lock); |
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) |
dev_priv->num_pipe = 3; |
1424,7 → 1444,7 |
out_rmmap: |
pci_iounmap(dev->pdev, dev_priv->regs); |
put_gmch: |
// dev_priv->gtt.gtt_remove(dev); |
// intel_gmch_remove(); |
put_bridge: |
// pci_dev_put(dev_priv->bridge_dev); |
free_priv: |
1456,11 → 1476,11 |
/* Cancel the retire work handler, which should be idle now. */ |
cancel_delayed_work_sync(&dev_priv->mm.retire_work); |
io_mapping_free(dev_priv->gtt.mappable); |
io_mapping_free(dev_priv->mm.gtt_mapping); |
if (dev_priv->mm.gtt_mtrr >= 0) { |
mtrr_del(dev_priv->mm.gtt_mtrr, |
dev_priv->gtt.mappable_base, |
dev_priv->gtt.mappable_end); |
dev_priv->mm.gtt_base_addr, |
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE); |
dev_priv->mm.gtt_mtrr = -1; |
} |
1486,8 → 1506,8 |
} |
/* Free error state after interrupts are fully disabled. */ |
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
cancel_work_sync(&dev_priv->gpu_error.work); |
del_timer_sync(&dev_priv->hangcheck_timer); |
cancel_work_sync(&dev_priv->error_work); |
i915_destroy_error_state(dev); |
if (dev->pdev->msi_enabled) |
1506,7 → 1526,10 |
mutex_unlock(&dev->struct_mutex); |
i915_gem_cleanup_aliasing_ppgtt(dev); |
i915_gem_cleanup_stolen(dev); |
drm_mm_takedown(&dev_priv->mm.stolen); |
intel_cleanup_overlay(dev); |
if (!I915_NEED_GFX_HWS(dev)) |
i915_free_hws(dev); |
} |
1518,11 → 1541,7 |
intel_teardown_mchbar(dev); |
destroy_workqueue(dev_priv->wq); |
pm_qos_remove_request(&dev_priv->pm_qos); |
if (dev_priv->slab) |
kmem_cache_destroy(dev_priv->slab); |
pci_dev_put(dev_priv->bridge_dev); |
kfree(dev->dev_private); |
/drivers/video/drm/i915/i915_gem.c |
---|
118,12 → 118,14 |
} |
static int |
i915_gem_wait_for_error(struct i915_gpu_error *error) |
i915_gem_wait_for_error(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct completion *x = &dev_priv->error_completion; |
unsigned long flags; |
int ret; |
#define EXIT_COND (!i915_reset_in_progress(error)) |
if (EXIT_COND) |
if (!atomic_read(&dev_priv->mm.wedged)) |
return 0; |
#if 0 |
/* |
131,9 → 133,7 |
* userspace. If it takes that long something really bad is going on and |
* we should simply try to bail out and fail as gracefully as possible. |
*/ |
ret = wait_event_interruptible_timeout(error->reset_queue, |
EXIT_COND, |
10*HZ); |
ret = wait_for_completion_interruptible_timeout(x, 10*HZ); |
if (ret == 0) { |
DRM_ERROR("Timed out waiting for the gpu reset to complete\n"); |
return -EIO; |
141,8 → 141,17 |
return ret; |
} |
if (atomic_read(&dev_priv->mm.wedged)) { |
/* GPU is hung, bump the completion count to account for |
* the token we just consumed so that we never hit zero and |
* end up waiting upon a subsequent completion event that |
* will never happen. |
*/ |
spin_lock_irqsave(&x->wait.lock, flags); |
x->done++; |
spin_unlock_irqrestore(&x->wait.lock, flags); |
} |
#endif |
#undef EXIT_COND |
return 0; |
} |
149,16 → 158,13 |
int i915_mutex_lock_interruptible(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
ret = i915_gem_wait_for_error(dev); |
if (ret) |
return ret; |
ret = mutex_lock_interruptible(&dev->struct_mutex); |
if (ret) |
return ret; |
mutex_lock(&dev->struct_mutex); |
WARN_ON(i915_verify_lists(dev)); |
return 0; |
177,7 → 183,6 |
i915_gem_init_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_gem_init *args = data; |
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
192,9 → 197,8 |
return -ENODEV; |
mutex_lock(&dev->struct_mutex); |
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end, |
args->gtt_end); |
dev_priv->gtt.mappable_end = args->gtt_end; |
i915_gem_init_global_gtt(dev, args->gtt_start, |
args->gtt_end, args->gtt_end); |
mutex_unlock(&dev->struct_mutex); |
return 0; |
217,24 → 221,12 |
pinned += obj->gtt_space->size; |
mutex_unlock(&dev->struct_mutex); |
args->aper_size = dev_priv->gtt.total; |
args->aper_size = dev_priv->mm.gtt_total; |
args->aper_available_size = args->aper_size - pinned; |
return 0; |
} |
void *i915_gem_object_alloc(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
return kmalloc(sizeof(struct drm_i915_gem_object), 0); |
} |
void i915_gem_object_free(struct drm_i915_gem_object *obj) |
{ |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
kfree(obj); |
} |
static int |
i915_gem_create(struct drm_file *file, |
struct drm_device *dev, |
305,7 → 297,13 |
args->size, &args->handle); |
} |
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
obj->tiling_mode != I915_TILING_NONE; |
} |
#if 0 |
static inline int |
448,6 → 446,7 |
loff_t offset; |
int shmem_page_offset, page_length, ret = 0; |
int obj_do_bit17_swizzling, page_do_bit17_swizzling; |
int hit_slowpath = 0; |
int prefaulted = 0; |
int needs_clflush = 0; |
struct scatterlist *sg; |
509,6 → 508,7 |
if (ret == 0) |
goto next_page; |
hit_slowpath = 1; |
mutex_unlock(&dev->struct_mutex); |
if (!prefaulted) { |
541,6 → 541,12 |
out: |
i915_gem_object_unpin_pages(obj); |
if (hit_slowpath) { |
/* Fixup: Kill any reinstated backing storage pages */ |
if (obj->madv == __I915_MADV_PURGED) |
i915_gem_object_truncate(obj); |
} |
return ret; |
} |
882,13 → 888,12 |
i915_gem_object_unpin_pages(obj); |
if (hit_slowpath) { |
/* |
* Fixup: Flush cpu caches in case we didn't flush the dirty |
* cachelines in-line while writing and the object moved |
* out of the cpu write domain while we've dropped the lock. |
*/ |
if (!needs_clflush_after && |
obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
/* Fixup: Kill any reinstated backing storage pages */ |
if (obj->madv == __I915_MADV_PURGED) |
i915_gem_object_truncate(obj); |
/* and flush dirty cachelines in case the object isn't in the cpu write |
* domain anymore. */ |
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) { |
i915_gem_clflush_object(obj); |
i915_gem_chipset_flush(dev); |
} |
913,12 → 918,6 |
struct drm_i915_gem_object *obj; |
int ret; |
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
return 0; |
} |
if (args->size == 0) |
return 0; |
981,17 → 980,26 |
} |
int |
i915_gem_check_wedge(struct i915_gpu_error *error, |
i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
bool interruptible) |
{ |
if (i915_reset_in_progress(error)) { |
if (atomic_read(&dev_priv->mm.wedged)) { |
struct completion *x = &dev_priv->error_completion; |
bool recovery_complete; |
unsigned long flags; |
/* Give the error handler a chance to run. */ |
spin_lock_irqsave(&x->wait.lock, flags); |
recovery_complete = x->done > 0; |
spin_unlock_irqrestore(&x->wait.lock, flags); |
/* Non-interruptible callers can't handle -EAGAIN, hence return |
* -EIO unconditionally for these. */ |
if (!interruptible) |
return -EIO; |
/* Recovery complete, but the reset failed ... */ |
if (i915_terminally_wedged(error)) |
/* Recovery complete, but still wedged means reset failure. */ |
if (recovery_complete) |
return -EIO; |
return -EAGAIN; |
1022,22 → 1030,13 |
* __wait_seqno - wait until execution of seqno has finished |
* @ring: the ring expected to report seqno |
* @seqno: duh! |
* @reset_counter: reset sequence associated with the given seqno |
* @interruptible: do an interruptible wait (normally yes) |
* @timeout: in - how long to wait (NULL forever); out - how much time remaining |
* |
* Note: It is of utmost importance that the passed in seqno and reset_counter |
* values have been read by the caller in an smp safe manner. Where read-side |
* locks are involved, it is sufficient to read the reset_counter before |
* unlocking the lock that protects the seqno. For lockless tricks, the |
* reset_counter _must_ be read before, and an appropriate smp_rmb must be |
* inserted. |
* |
* Returns 0 if the seqno was found within the alloted time. Else returns the |
* errno with remaining time filled in timeout argument. |
*/ |
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, |
unsigned reset_counter, |
bool interruptible, struct timespec *timeout) |
{ |
drm_i915_private_t *dev_priv = ring->dev->dev_private; |
1067,8 → 1066,7 |
#define EXIT_COND \ |
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \ |
i915_reset_in_progress(&dev_priv->gpu_error) || \ |
reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
atomic_read(&dev_priv->mm.wedged)) |
do { |
if (interruptible) |
end = wait_event_interruptible_timeout(ring->irq_queue, |
1078,14 → 1076,7 |
end = wait_event_timeout(ring->irq_queue, EXIT_COND, |
timeout_jiffies); |
/* We need to check whether any gpu reset happened in between |
* the caller grabbing the seqno and now ... */ |
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
end = -EAGAIN; |
/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely |
* gone. */ |
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); |
ret = i915_gem_check_wedge(dev_priv, interruptible); |
if (ret) |
end = ret; |
} while (end == 0 && wait_forever); |
1131,7 → 1122,7 |
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
BUG_ON(seqno == 0); |
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible); |
ret = i915_gem_check_wedge(dev_priv, interruptible); |
if (ret) |
return ret; |
1139,9 → 1130,7 |
if (ret) |
return ret; |
return __wait_seqno(ring, seqno, |
atomic_read(&dev_priv->gpu_error.reset_counter), |
interruptible, NULL); |
return __wait_seqno(ring, seqno, interruptible, NULL); |
} |
/** |
1188,7 → 1177,6 |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring = obj->ring; |
unsigned reset_counter; |
u32 seqno; |
int ret; |
1199,7 → 1187,7 |
if (seqno == 0) |
return 0; |
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true); |
ret = i915_gem_check_wedge(dev_priv, true); |
if (ret) |
return ret; |
1207,9 → 1195,8 |
if (ret) |
return ret; |
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
mutex_unlock(&dev->struct_mutex); |
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); |
ret = __wait_seqno(ring, seqno, true, NULL); |
mutex_lock(&dev->struct_mutex); |
i915_gem_retire_requests_ring(ring); |
1240,13 → 1227,6 |
uint32_t write_domain = args->write_domain; |
int ret; |
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
return 0; |
} |
/* Only handle setting domains to types used by the CPU. */ |
if (write_domain & I915_GEM_GPU_DOMAINS) |
return -EINVAL; |
1318,12 → 1298,6 |
struct drm_gem_object *obj; |
unsigned long addr = 0; |
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
return 0; |
} |
obj = drm_gem_object_lookup(dev, file, args->handle); |
if (obj == NULL) |
return -ENOENT; |
1390,7 → 1364,7 |
obj->fault_mappable = false; |
} |
uint32_t |
static uint32_t |
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode) |
{ |
uint32_t gtt_size; |
1418,15 → 1392,16 |
* Return the required GTT alignment for an object, taking into account |
* potential fence register mapping. |
*/ |
uint32_t |
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
int tiling_mode, bool fenced) |
static uint32_t |
i915_gem_get_gtt_alignment(struct drm_device *dev, |
uint32_t size, |
int tiling_mode) |
{ |
/* |
* Minimum alignment is 4k (GTT page size), but might be greater |
* if a fence register is needed for the object. |
*/ |
if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) || |
if (INTEL_INFO(dev)->gen >= 4 || |
tiling_mode == I915_TILING_NONE) |
return 4096; |
1466,104 → 1441,6 |
return i915_gem_get_gtt_size(dev, size, tiling_mode); |
} |
int |
i915_gem_mmap_gtt(struct drm_file *file, |
struct drm_device *dev, |
uint32_t handle, |
uint64_t *offset) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
unsigned long pfn; |
char *mem, *ptr; |
int ret; |
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
if (&obj->base == NULL) { |
ret = -ENOENT; |
goto unlock; |
} |
if (obj->base.size > dev_priv->gtt.mappable_end) { |
ret = -E2BIG; |
goto out; |
} |
if (obj->madv != I915_MADV_WILLNEED) { |
DRM_ERROR("Attempting to mmap a purgeable buffer\n"); |
ret = -EINVAL; |
goto out; |
} |
/* Now bind it into the GTT if needed */ |
ret = i915_gem_object_pin(obj, 0, true, false); |
if (ret) |
goto out; |
ret = i915_gem_object_set_to_gtt_domain(obj, 1); |
if (ret) |
goto unpin; |
ret = i915_gem_object_get_fence(obj); |
if (ret) |
goto unpin; |
obj->fault_mappable = true; |
pfn = dev_priv->gtt.mappable_base + obj->gtt_offset; |
/* Finally, remap it using the new GTT offset */ |
mem = UserAlloc(obj->base.size); |
if(unlikely(mem == NULL)) |
{ |
ret = -ENOMEM; |
goto unpin; |
} |
for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096) |
MapPage(ptr, pfn, PG_SHARED|PG_UW); |
unpin: |
i915_gem_object_unpin(obj); |
*offset = (u64)mem; |
out: |
drm_gem_object_unreference(&obj->base); |
unlock: |
mutex_unlock(&dev->struct_mutex); |
return ret; |
} |
/** |
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing |
* @dev: DRM device |
* @data: GTT mapping ioctl data |
* @file: GEM object info |
* |
* Simply returns the fake offset to userspace so it can mmap it. |
* The mmap call will end up in drm_gem_mmap(), which will set things |
* up so we can get faults in the handler above. |
* |
* The fault handler will take care of binding the object into the GTT |
* (since it may have been evicted to make room for something), allocating |
* a fence register, and mapping the appropriate aperture address into |
* userspace. |
*/ |
int |
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file) |
{ |
struct drm_i915_gem_mmap_gtt *args = data; |
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); |
} |
/* Immediately discard the backing storage */ |
static void |
i915_gem_object_truncate(struct drm_i915_gem_object *obj) |
1627,7 → 1504,7 |
kfree(obj->pages); |
} |
int |
static int |
i915_gem_object_put_pages(struct drm_i915_gem_object *obj) |
{ |
const struct drm_i915_gem_object_ops *ops = obj->ops; |
1792,6 → 1669,9 |
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
BUG_ON(!obj->active); |
if (obj->pin_count) /* are we a framebuffer? */ |
intel_mark_fb_idle(obj); |
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
list_del_init(&obj->ring_list); |
1811,24 → 1691,30 |
} |
static int |
i915_gem_init_seqno(struct drm_device *dev, u32 seqno) |
i915_gem_handle_seqno_wrap(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_ring_buffer *ring; |
int ret, i, j; |
/* Carefully retire all requests without writing to the rings */ |
/* The hardware uses various monotonic 32-bit counters, if we |
* detect that they will wraparound we need to idle the GPU |
* and reset those counters. |
*/ |
ret = 0; |
for_each_ring(ring, dev_priv, i) { |
ret = intel_ring_idle(ring); |
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
ret |= ring->sync_seqno[j] != 0; |
} |
if (ret == 0) |
return ret; |
ret = i915_gpu_idle(dev); |
if (ret) |
return ret; |
} |
i915_gem_retire_requests(dev); |
/* Finally reset hw state */ |
for_each_ring(ring, dev_priv, i) { |
intel_ring_init_seqno(ring, seqno); |
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++) |
ring->sync_seqno[j] = 0; |
} |
1836,32 → 1722,6 |
return 0; |
} |
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
if (seqno == 0) |
return -EINVAL; |
/* HWS page needs to be set less than what we |
* will inject to ring |
*/ |
ret = i915_gem_init_seqno(dev, seqno - 1); |
if (ret) |
return ret; |
/* Carefully set the last_seqno value so that wrap |
* detection still works |
*/ |
dev_priv->next_seqno = seqno; |
dev_priv->last_seqno = seqno - 1; |
if (dev_priv->last_seqno == 0) |
dev_priv->last_seqno--; |
return 0; |
} |
int |
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) |
{ |
1869,7 → 1729,7 |
/* reserve 0 for non-seqno */ |
if (dev_priv->next_seqno == 0) { |
int ret = i915_gem_init_seqno(dev, 0); |
int ret = i915_gem_handle_seqno_wrap(dev); |
if (ret) |
return ret; |
1876,7 → 1736,7 |
dev_priv->next_seqno = 1; |
} |
*seqno = dev_priv->last_seqno = dev_priv->next_seqno++; |
*seqno = dev_priv->next_seqno++; |
return 0; |
} |
2266,6 → 2126,9 |
{ |
u32 old_write_domain, old_read_domains; |
/* Act a barrier for all accesses through the GTT */ |
mb(); |
/* Force a pagefault for domain tracking on next user access */ |
// i915_gem_release_mmap(obj); |
2272,9 → 2135,6 |
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
return; |
/* Wait for any direct GTT access to complete */ |
mb(); |
old_read_domains = obj->base.read_domains; |
old_write_domain = obj->base.write_domain; |
2293,7 → 2153,7 |
i915_gem_object_unbind(struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
int ret; |
int ret = 0; |
if(obj == get_fb_obj()) |
return 0; |
2363,22 → 2223,37 |
return 0; |
} |
static void i965_write_fence_reg(struct drm_device *dev, int reg, |
static void sandybridge_write_fence_reg(struct drm_device *dev, int reg, |
struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
int fence_reg; |
int fence_pitch_shift; |
uint64_t val; |
if (INTEL_INFO(dev)->gen >= 6) { |
fence_reg = FENCE_REG_SANDYBRIDGE_0; |
fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; |
} else { |
fence_reg = FENCE_REG_965_0; |
fence_pitch_shift = I965_FENCE_PITCH_SHIFT; |
if (obj) { |
u32 size = obj->gtt_space->size; |
val = (uint64_t)((obj->gtt_offset + size - 4096) & |
0xfffff000) << 32; |
val |= obj->gtt_offset & 0xfffff000; |
val |= (uint64_t)((obj->stride / 128) - 1) << |
SANDYBRIDGE_FENCE_PITCH_SHIFT; |
if (obj->tiling_mode == I915_TILING_Y) |
val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
val |= I965_FENCE_REG_VALID; |
} else |
val = 0; |
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val); |
POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8); |
} |
static void i965_write_fence_reg(struct drm_device *dev, int reg, |
struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
uint64_t val; |
if (obj) { |
u32 size = obj->gtt_space->size; |
2385,7 → 2260,7 |
val = (uint64_t)((obj->gtt_offset + size - 4096) & |
0xfffff000) << 32; |
val |= obj->gtt_offset & 0xfffff000; |
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift; |
val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; |
if (obj->tiling_mode == I915_TILING_Y) |
val |= 1 << I965_FENCE_TILING_Y_SHIFT; |
val |= I965_FENCE_REG_VALID; |
2392,9 → 2267,8 |
} else |
val = 0; |
fence_reg += reg * 8; |
I915_WRITE64(fence_reg, val); |
POSTING_READ(fence_reg); |
I915_WRITE64(FENCE_REG_965_0 + reg * 8, val); |
POSTING_READ(FENCE_REG_965_0 + reg * 8); |
} |
static void i915_write_fence_reg(struct drm_device *dev, int reg, |
2473,37 → 2347,18 |
POSTING_READ(FENCE_REG_830_0 + reg * 4); |
} |
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj) |
{ |
return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT; |
} |
static void i915_gem_write_fence(struct drm_device *dev, int reg, |
struct drm_i915_gem_object *obj) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* Ensure that all CPU reads are completed before installing a fence |
* and all writes before removing the fence. |
*/ |
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) |
mb(); |
switch (INTEL_INFO(dev)->gen) { |
case 7: |
case 6: |
case 6: sandybridge_write_fence_reg(dev, reg, obj); break; |
case 5: |
case 4: i965_write_fence_reg(dev, reg, obj); break; |
case 3: i915_write_fence_reg(dev, reg, obj); break; |
case 2: i830_write_fence_reg(dev, reg, obj); break; |
default: BUG(); |
default: break; |
} |
/* And similarly be paranoid that no direct access to this region |
* is reordered to before the fence is installed. |
*/ |
if (i915_gem_object_needs_mb(obj)) |
mb(); |
} |
static inline int fence_number(struct drm_i915_private *dev_priv, |
2533,7 → 2388,7 |
} |
static int |
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj) |
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj) |
{ |
if (obj->last_fenced_seqno) { |
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno); |
2543,6 → 2398,12 |
obj->last_fenced_seqno = 0; |
} |
/* Ensure that all CPU reads are completed before installing a fence |
* and all writes before removing the fence. |
*/ |
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT) |
mb(); |
obj->fenced_gpu_access = false; |
return 0; |
} |
2553,7 → 2414,7 |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
int ret; |
ret = i915_gem_object_wait_fence(obj); |
ret = i915_gem_object_flush_fence(obj); |
if (ret) |
return ret; |
2627,7 → 2488,7 |
* will need to serialise the write to the associated fence register? |
*/ |
if (obj->fence_dirty) { |
ret = i915_gem_object_wait_fence(obj); |
ret = i915_gem_object_flush_fence(obj); |
if (ret) |
return ret; |
} |
2648,7 → 2509,7 |
if (reg->obj) { |
struct drm_i915_gem_object *old = reg->obj; |
ret = i915_gem_object_wait_fence(old); |
ret = i915_gem_object_flush_fence(old); |
if (ret) |
return ret; |
2671,7 → 2532,7 |
/* On non-LLC machines we have to be careful when putting differing |
* types of snoopable memory together to avoid the prefetcher |
* crossing memory domains and dying. |
* crossing memory domains and dieing. |
*/ |
if (HAS_LLC(dev)) |
return true; |
2749,16 → 2610,21 |
bool mappable, fenceable; |
int ret; |
if (obj->madv != I915_MADV_WILLNEED) { |
DRM_ERROR("Attempting to bind a purgeable object\n"); |
return -EINVAL; |
} |
fence_size = i915_gem_get_gtt_size(dev, |
obj->base.size, |
obj->tiling_mode); |
fence_alignment = i915_gem_get_gtt_alignment(dev, |
obj->base.size, |
obj->tiling_mode, true); |
obj->tiling_mode); |
unfenced_alignment = |
i915_gem_get_gtt_alignment(dev, |
i915_gem_get_unfenced_gtt_alignment(dev, |
obj->base.size, |
obj->tiling_mode, false); |
obj->tiling_mode); |
if (alignment == 0) |
alignment = map_and_fenceable ? fence_alignment : |
2774,7 → 2640,7 |
* before evicting everything in a vain attempt to find space. |
*/ |
if (obj->base.size > |
(map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) { |
(map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) { |
DRM_ERROR("Attempting to bind an object larger than the aperture\n"); |
return -E2BIG; |
} |
2795,7 → 2661,7 |
if (map_and_fenceable) |
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node, |
size, alignment, obj->cache_level, |
0, dev_priv->gtt.mappable_end); |
0, dev_priv->mm.gtt_mappable_end); |
else |
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node, |
size, alignment, obj->cache_level); |
2829,7 → 2695,7 |
(node->start & (fence_alignment - 1)) == 0; |
mappable = |
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end; |
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end; |
obj->map_and_fenceable = mappable && fenceable; |
2849,13 → 2715,6 |
if (obj->pages == NULL) |
return; |
/* |
* Stolen memory is always coherent with the GPU as it is explicitly |
* marked as wc by the system, or the system is cache-coherent. |
*/ |
if (obj->stolen) |
return; |
/* If the GPU is snooping the contents of the CPU cache, |
* we do not need to manually clear the CPU cache lines. However, |
* the caches are only snooped when the render cache is |
2989,13 → 2848,6 |
i915_gem_object_flush_cpu_write_domain(obj); |
/* Serialise direct access to this object with the barriers for |
* coherent writes from the GPU, by effectively invalidating the |
* GTT domain upon first access. |
*/ |
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) |
mb(); |
old_write_domain = obj->base.write_domain; |
old_read_domains = obj->base.read_domains; |
3103,12 → 2955,6 |
struct drm_i915_gem_object *obj; |
int ret; |
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
return 0; |
} |
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
3135,12 → 2981,6 |
enum i915_cache_level level; |
int ret; |
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
return 0; |
} |
switch (args->caching) { |
case I915_CACHING_NONE: |
level = I915_CACHE_NONE; |
3314,18 → 3154,12 |
unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20); |
struct drm_i915_gem_request *request; |
struct intel_ring_buffer *ring = NULL; |
unsigned reset_counter; |
u32 seqno = 0; |
int ret; |
ret = i915_gem_wait_for_error(&dev_priv->gpu_error); |
if (ret) |
return ret; |
if (atomic_read(&dev_priv->mm.wedged)) |
return -EIO; |
ret = i915_gem_check_wedge(&dev_priv->gpu_error, false); |
if (ret) |
return ret; |
spin_lock(&file_priv->mm.lock); |
list_for_each_entry(request, &file_priv->mm.request_list, client_list) { |
if (time_after_eq(request->emitted_jiffies, recent_enough)) |
3334,13 → 3168,12 |
ring = request->ring; |
seqno = request->seqno; |
} |
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
spin_unlock(&file_priv->mm.lock); |
if (seqno == 0) |
return 0; |
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL); |
ret = __wait_seqno(ring, seqno, true, NULL); |
if (ret == 0) |
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); |
3414,12 → 3247,6 |
struct drm_i915_gem_object *obj; |
int ret; |
if(args->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, args->handle); |
return 0; |
} |
ret = i915_mutex_lock_interruptible(dev); |
if (ret) |
return ret; |
3517,12 → 3344,6 |
if (ret) |
return ret; |
if(args->handle == -2) |
{ |
obj = get_fb_obj(); |
drm_gem_object_reference(&obj->base); |
} |
else |
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); |
if (&obj->base == NULL) { |
ret = -ENOENT; |
3633,7 → 3454,7 |
{ |
struct drm_i915_gem_object *obj; |
struct address_space *mapping; |
gfp_t mask; |
u32 mask; |
obj = kzalloc(sizeof(*obj), GFP_KERNEL); |
if (obj == NULL) |
3744,10 → 3565,6 |
} |
i915_gem_retire_requests(dev); |
/* Under UMS, be paranoid and evict. */ |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
i915_gem_evict_everything(dev); |
i915_gem_reset_fences(dev); |
/* Hack! Don't let anybody do execbuf while we don't control the chip. |
3755,7 → 3572,7 |
* And not confound mm.suspended! |
*/ |
dev_priv->mm.suspended = 1; |
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); |
del_timer_sync(&dev_priv->hangcheck_timer); |
i915_kernel_lost_context(dev); |
i915_gem_cleanup_ringbuffer(dev); |
3775,7 → 3592,7 |
u32 misccpctl; |
int i; |
if (!HAS_L3_GPU_CACHE(dev)) |
if (!IS_IVYBRIDGE(dev)) |
return; |
if (!dev_priv->l3_parity.remap_info) |
3818,10 → 3635,8 |
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL); |
if (IS_GEN6(dev)) |
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB)); |
else if (IS_GEN7(dev)) |
else |
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB)); |
else |
BUG(); |
} |
static bool |
3840,11 → 3655,22 |
return true; |
} |
static int i915_gem_init_rings(struct drm_device *dev) |
int |
i915_gem_init_hw(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
drm_i915_private_t *dev_priv = dev->dev_private; |
int ret; |
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
return -EIO; |
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
i915_gem_l3_remap(dev); |
i915_gem_init_swizzling(dev); |
ret = intel_init_render_ring_buffer(dev); |
if (ret) |
return ret; |
3861,50 → 3687,37 |
goto cleanup_bsd_ring; |
} |
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000)); |
if (ret) |
goto cleanup_blt_ring; |
dev_priv->next_seqno = 1; |
/* |
* XXX: There was some w/a described somewhere suggesting loading |
* contexts before PPGTT. |
*/ |
i915_gem_context_init(dev); |
i915_gem_init_ppgtt(dev); |
return 0; |
cleanup_blt_ring: |
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]); |
cleanup_bsd_ring: |
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]); |
cleanup_render_ring: |
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]); |
return ret; |
} |
int |
i915_gem_init_hw(struct drm_device *dev) |
static bool |
intel_enable_ppgtt(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
int ret; |
if (i915_enable_ppgtt >= 0) |
return i915_enable_ppgtt; |
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) |
return -EIO; |
#ifdef CONFIG_INTEL_IOMMU |
/* Disable ppgtt on SNB if VT-d is on. */ |
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
return false; |
#endif |
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1)) |
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000); |
i915_gem_l3_remap(dev); |
i915_gem_init_swizzling(dev); |
ret = i915_gem_init_rings(dev); |
if (ret) |
return ret; |
/* |
* XXX: There was some w/a described somewhere suggesting loading |
* contexts before PPGTT. |
*/ |
i915_gem_context_init(dev); |
i915_gem_init_ppgtt(dev); |
return 0; |
return true; |
} |
#define LFB_SIZE 0xC00000 |
3912,10 → 3725,39 |
int i915_gem_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned long gtt_size, mappable_size; |
int ret; |
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT; |
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
mutex_lock(&dev->struct_mutex); |
i915_gem_init_global_gtt(dev); |
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
/* PPGTT pdes are stolen from global gtt ptes, so shrink the |
* aperture accordingly when using aliasing ppgtt. */ |
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
ret = i915_gem_init_aliasing_ppgtt(dev); |
if (ret) { |
mutex_unlock(&dev->struct_mutex); |
return ret; |
} |
} else { |
/* Let GEM Manage all of the aperture. |
* |
* However, leave one page at the end still bound to the scratch |
* page. There are a number of places where the hardware |
* apparently prefetches past the end of the object, and we've |
* seen multiple hangs with the GPU head pointer stuck in a |
* batchbuffer bound at the last page of the aperture. One page |
* should be enough to keep any prefetching inside of the |
* aperture. |
*/ |
i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE); |
} |
ret = i915_gem_init_hw(dev); |
mutex_unlock(&dev->struct_mutex); |
if (ret) { |
3949,9 → 3791,9 |
if (drm_core_check_feature(dev, DRIVER_MODESET)) |
return 0; |
if (i915_reset_in_progress(&dev_priv->gpu_error)) { |
if (atomic_read(&dev_priv->mm.wedged)) { |
DRM_ERROR("Reenabling wedged hardware, good luck\n"); |
atomic_set(&dev_priv->gpu_error.reset_counter, 0); |
atomic_set(&dev_priv->mm.wedged, 0); |
} |
mutex_lock(&dev->struct_mutex); |
4016,8 → 3858,8 |
void |
i915_gem_load(struct drm_device *dev) |
{ |
int i; |
drm_i915_private_t *dev_priv = dev->dev_private; |
int i; |
INIT_LIST_HEAD(&dev_priv->mm.active_list); |
INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4030,7 → 3872,6 |
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
i915_gem_retire_work_handler); |
init_waitqueue_head(&dev_priv->gpu_error.reset_queue); |
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */ |
if (IS_GEN3(dev)) { |
/drivers/video/drm/i915/i915_irq.c |
---|
45,8 → 45,33 |
#define MAX_NOPID ((u32)~0) |
/** |
* Interrupts that are always left unmasked. |
* |
* Since pipe events are edge-triggered from the PIPESTAT register to IIR, |
* we leave them always unmasked in IMR and then control enabling them through |
* PIPESTAT alone. |
*/ |
#define I915_INTERRUPT_ENABLE_FIX \ |
(I915_ASLE_INTERRUPT | \ |
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \ |
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \ |
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \ |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
/** Interrupts that we mask and unmask at runtime. */ |
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT) |
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\ |
PIPE_VBLANK_INTERRUPT_STATUS) |
#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\ |
PIPE_VBLANK_INTERRUPT_ENABLE) |
#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \ |
DRM_I915_VBLANK_PIPE_B) |
/* For display hotplug interrupt */ |
static void |
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
190,34 → 215,7 |
return I915_READ(reg); |
} |
/* |
* Handle hotplug events outside the interrupt handler proper. |
*/ |
static void i915_hotplug_work_func(struct work_struct *work) |
{ |
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
hotplug_work); |
struct drm_device *dev = dev_priv->dev; |
struct drm_mode_config *mode_config = &dev->mode_config; |
struct intel_encoder *encoder; |
/* HPD irq before everything is fully set up. */ |
if (!dev_priv->enable_hotplug_processing) |
return; |
mutex_lock(&mode_config->mutex); |
DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
if (encoder->hot_plug) |
encoder->hot_plug(encoder); |
mutex_unlock(&mode_config->mutex); |
/* Just fire off a uevent and let userspace tell us what to do */ |
drm_helper_hpd_irq_event(dev); |
} |
static void notify_ring(struct drm_device *dev, |
struct intel_ring_buffer *ring) |
{ |
404,20 → 402,6 |
// queue_work(dev_priv->wq, &dev_priv->rps.work); |
} |
static void gmbus_irq_handler(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
wake_up_all(&dev_priv->gmbus_wait_queue); |
} |
static void dp_aux_irq_handler(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
wake_up_all(&dev_priv->gmbus_wait_queue); |
} |
static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
427,6 → 411,7 |
unsigned long irqflags; |
int pipe; |
u32 pipe_stats[I915_MAX_PIPES]; |
bool blc_event; |
atomic_inc(&dev_priv->irq_received); |
477,19 → 462,19 |
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
hotplug_status); |
if (hotplug_status & dev_priv->hotplug_supported_mask) |
queue_work(dev_priv->wq, |
&dev_priv->hotplug_work); |
// if (hotplug_status & dev_priv->hotplug_supported_mask) |
// queue_work(dev_priv->wq, |
// &dev_priv->hotplug_work); |
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
I915_READ(PORT_HOTPLUG_STAT); |
} |
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
gmbus_irq_handler(dev); |
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
blc_event = true; |
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
// gen6_queue_rps_work(dev_priv, pm_iir); |
if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
gen6_queue_rps_work(dev_priv, pm_iir); |
I915_WRITE(GTIIR, gt_iir); |
I915_WRITE(GEN6_PMIIR, pm_iir); |
505,8 → 490,7 |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
int pipe; |
if (pch_iir & SDE_HOTPLUG_MASK) |
queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
printf("%s\n", __FUNCTION__); |
if (pch_iir & SDE_AUDIO_POWER_MASK) |
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
513,11 → 497,8 |
(pch_iir & SDE_AUDIO_POWER_MASK) >> |
SDE_AUDIO_POWER_SHIFT); |
if (pch_iir & SDE_AUX_MASK) |
dp_aux_irq_handler(dev); |
if (pch_iir & SDE_GMBUS) |
gmbus_irq_handler(dev); |
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); |
if (pch_iir & SDE_AUDIO_HDCP_MASK) |
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); |
551,9 → 532,6 |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
int pipe; |
if (pch_iir & SDE_HOTPLUG_MASK_CPT) |
queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) |
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
560,10 → 538,10 |
SDE_AUDIO_POWER_SHIFT_CPT); |
if (pch_iir & SDE_AUX_MASK_CPT) |
dp_aux_irq_handler(dev); |
DRM_DEBUG_DRIVER("AUX channel interrupt\n"); |
if (pch_iir & SDE_GMBUS_CPT) |
gmbus_irq_handler(dev); |
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n"); |
if (pch_iir & SDE_AUDIO_CP_REQ_CPT) |
DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); |
582,7 → 560,7 |
{ |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; |
u32 de_iir, gt_iir, de_ier, pm_iir; |
irqreturn_t ret = IRQ_NONE; |
int i; |
592,15 → 570,6 |
de_ier = I915_READ(DEIER); |
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
/* Disable south interrupts. We'll only write to SDEIIR once, so further |
* interrupts will will be stored on its back queue, and then we'll be |
* able to process them after we restore SDEIER (as soon as we restore |
* it, we'll get an interrupt if SDEIIR still has something to process |
* due to its back queue). */ |
sde_ier = I915_READ(SDEIER); |
I915_WRITE(SDEIER, 0); |
POSTING_READ(SDEIER); |
gt_iir = I915_READ(GTIIR); |
if (gt_iir) { |
snb_gt_irq_handler(dev, dev_priv, gt_iir); |
610,8 → 579,6 |
de_iir = I915_READ(DEIIR); |
if (de_iir) { |
if (de_iir & DE_AUX_CHANNEL_A_IVB) |
dp_aux_irq_handler(dev); |
#if 0 |
if (de_iir & DE_GSE_IVB) |
intel_opregion_gse_intr(dev); |
629,6 → 596,8 |
if (de_iir & DE_PCH_EVENT_IVB) { |
u32 pch_iir = I915_READ(SDEIIR); |
// if (pch_iir & SDE_HOTPLUG_MASK_CPT) |
// queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
cpt_irq_handler(dev, pch_iir); |
/* clear PCH hotplug event before clear CPU irq */ |
649,8 → 618,6 |
I915_WRITE(DEIER, de_ier); |
POSTING_READ(DEIER); |
I915_WRITE(SDEIER, sde_ier); |
POSTING_READ(SDEIER); |
return ret; |
} |
670,7 → 637,7 |
struct drm_device *dev = (struct drm_device *) arg; |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
int ret = IRQ_NONE; |
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; |
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir; |
atomic_inc(&dev_priv->irq_received); |
679,20 → 646,13 |
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
POSTING_READ(DEIER); |
/* Disable south interrupts. We'll only write to SDEIIR once, so further |
* interrupts will will be stored on its back queue, and then we'll be |
* able to process them after we restore SDEIER (as soon as we restore |
* it, we'll get an interrupt if SDEIIR still has something to process |
* due to its back queue). */ |
sde_ier = I915_READ(SDEIER); |
I915_WRITE(SDEIER, 0); |
POSTING_READ(SDEIER); |
de_iir = I915_READ(DEIIR); |
gt_iir = I915_READ(GTIIR); |
pch_iir = I915_READ(SDEIIR); |
pm_iir = I915_READ(GEN6_PMIIR); |
if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) |
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 && |
(!IS_GEN6(dev) || pm_iir == 0)) |
goto done; |
ret = IRQ_HANDLED; |
701,10 → 661,6 |
ilk_gt_irq_handler(dev, dev_priv, gt_iir); |
else |
snb_gt_irq_handler(dev, dev_priv, gt_iir); |
if (de_iir & DE_AUX_CHANNEL_A) |
dp_aux_irq_handler(dev); |
#if 0 |
if (de_iir & DE_GSE) |
intel_opregion_gse_intr(dev); |
728,15 → 684,12 |
/* check event from PCH */ |
if (de_iir & DE_PCH_EVENT) { |
u32 pch_iir = I915_READ(SDEIIR); |
// if (pch_iir & hotplug_mask) |
// queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
if (HAS_PCH_CPT(dev)) |
cpt_irq_handler(dev, pch_iir); |
else |
ibx_irq_handler(dev, pch_iir); |
/* should clear PCH hotplug event before clear CPU irq */ |
I915_WRITE(SDEIIR, pch_iir); |
} |
#if 0 |
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
745,6 → 698,8 |
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) |
gen6_queue_rps_work(dev_priv, pm_iir); |
#endif |
/* should clear PCH hotplug event before clear CPU irq */ |
I915_WRITE(SDEIIR, pch_iir); |
I915_WRITE(GTIIR, gt_iir); |
I915_WRITE(DEIIR, de_iir); |
I915_WRITE(GEN6_PMIIR, pm_iir); |
752,8 → 707,6 |
done: |
I915_WRITE(DEIER, de_ier); |
POSTING_READ(DEIER); |
I915_WRITE(SDEIER, sde_ier); |
POSTING_READ(SDEIER); |
return ret; |
} |
780,7 → 733,7 |
instdone[1] = I915_READ(INSTDONE1); |
break; |
default: |
WARN_ONCE(1, "Unsupported platform\n"); |
WARN(1, "Unsupported platform\n"); |
case 7: |
instdone[0] = I915_READ(GEN7_INSTDONE_1); |
instdone[1] = I915_READ(GEN7_SC_INSTDONE); |
818,7 → 771,7 |
goto unwind; |
local_irq_save(flags); |
if (reloc_offset < dev_priv->gtt.mappable_end && |
if (reloc_offset < dev_priv->mm.gtt_mappable_end && |
src->has_global_gtt_mapping) { |
void __iomem *s; |
827,18 → 780,10 |
* captures what the GPU read. |
*/ |
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
reloc_offset); |
memcpy_fromio(d, s, PAGE_SIZE); |
io_mapping_unmap_atomic(s); |
} else if (src->stolen) { |
unsigned long offset; |
offset = dev_priv->mm.stolen_base; |
offset += src->stolen->start; |
offset += i << PAGE_SHIFT; |
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); |
} else { |
struct page *page; |
void *s; |
985,8 → 930,6 |
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
break; |
default: |
BUG(); |
} |
} |
1000,18 → 943,6 |
if (!ring->get_seqno) |
return NULL; |
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { |
u32 acthd = I915_READ(ACTHD); |
if (WARN_ON(ring->id != RCS)) |
return NULL; |
obj = ring->private; |
if (acthd >= obj->gtt_offset && |
acthd < obj->gtt_offset + obj->base.size) |
return i915_error_object_create(dev_priv, obj); |
} |
seqno = ring->get_seqno(ring, false); |
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
if (obj->ring != ring) |
1135,9 → 1066,9 |
unsigned long flags; |
int i, pipe; |
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
error = dev_priv->gpu_error.first_error; |
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
spin_lock_irqsave(&dev_priv->error_lock, flags); |
error = dev_priv->first_error; |
spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
if (error) |
return; |
1148,8 → 1079,7 |
return; |
} |
DRM_INFO("capturing error event; look for more information in" |
"/sys/kernel/debug/dri/%d/i915_error_state\n", |
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
dev->primary->index); |
kref_init(&error->ref); |
1232,12 → 1162,12 |
error->overlay = intel_overlay_capture_error_state(dev); |
error->display = intel_display_capture_error_state(dev); |
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
if (dev_priv->gpu_error.first_error == NULL) { |
dev_priv->gpu_error.first_error = error; |
spin_lock_irqsave(&dev_priv->error_lock, flags); |
if (dev_priv->first_error == NULL) { |
dev_priv->first_error = error; |
error = NULL; |
} |
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
if (error) |
i915_error_state_free(&error->ref); |
1249,10 → 1179,10 |
struct drm_i915_error_state *error; |
unsigned long flags; |
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
error = dev_priv->gpu_error.first_error; |
dev_priv->gpu_error.first_error = NULL; |
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
spin_lock_irqsave(&dev_priv->error_lock, flags); |
error = dev_priv->first_error; |
dev_priv->first_error = NULL; |
spin_unlock_irqrestore(&dev_priv->error_lock, flags); |
if (error) |
kref_put(&error->ref, i915_error_state_free); |
1373,12 → 1303,11 |
i915_report_and_clear_eir(dev); |
if (wedged) { |
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
&dev_priv->gpu_error.reset_counter); |
// INIT_COMPLETION(dev_priv->error_completion); |
atomic_set(&dev_priv->mm.wedged, 1); |
/* |
* Wakeup waiting processes so that the reset work item |
* doesn't deadlock trying to grab various locks. |
* Wakeup waiting processes so they don't hang |
*/ |
for_each_ring(ring, dev_priv, i) |
wake_up_all(&ring->irq_queue); |
1650,7 → 1579,7 |
* This register is the same on all known PCH chips. |
*/ |
static void ibx_enable_hotplug(struct drm_device *dev) |
static void ironlake_enable_pch_hotplug(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 hotplug; |
1663,36 → 1592,14 |
I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
} |
static void ibx_irq_postinstall(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 mask; |
if (HAS_PCH_IBX(dev)) |
mask = SDE_HOTPLUG_MASK | |
SDE_GMBUS | |
SDE_AUX_MASK; |
else |
mask = SDE_HOTPLUG_MASK_CPT | |
SDE_GMBUS_CPT | |
SDE_AUX_MASK_CPT; |
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
I915_WRITE(SDEIMR, ~mask); |
I915_WRITE(SDEIER, mask); |
POSTING_READ(SDEIER); |
ibx_enable_hotplug(dev); |
} |
static int ironlake_irq_postinstall(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
/* enable kind of interrupts always enabled */ |
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
DE_AUX_CHANNEL_A; |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
u32 render_irqs; |
u32 hotplug_mask; |
dev_priv->irq_mask = ~display_mask; |
1720,13 → 1627,33 |
I915_WRITE(GTIER, render_irqs); |
POSTING_READ(GTIER); |
ibx_irq_postinstall(dev); |
if (HAS_PCH_CPT(dev)) { |
hotplug_mask = (SDE_CRT_HOTPLUG_CPT | |
SDE_PORTB_HOTPLUG_CPT | |
SDE_PORTC_HOTPLUG_CPT | |
SDE_PORTD_HOTPLUG_CPT); |
} else { |
hotplug_mask = (SDE_CRT_HOTPLUG | |
SDE_PORTB_HOTPLUG | |
SDE_PORTC_HOTPLUG | |
SDE_PORTD_HOTPLUG | |
SDE_AUX_MASK); |
} |
dev_priv->pch_irq_mask = ~hotplug_mask; |
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); |
I915_WRITE(SDEIER, hotplug_mask); |
POSTING_READ(SDEIER); |
// ironlake_enable_pch_hotplug(dev); |
if (IS_IRONLAKE_M(dev)) { |
/* Clear & enable PCU event interrupts */ |
I915_WRITE(DEIIR, DE_PCU_EVENT); |
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); |
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
// ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
} |
return 0; |
1740,9 → 1667,9 |
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | |
DE_PLANEC_FLIP_DONE_IVB | |
DE_PLANEB_FLIP_DONE_IVB | |
DE_PLANEA_FLIP_DONE_IVB | |
DE_AUX_CHANNEL_A_IVB; |
DE_PLANEA_FLIP_DONE_IVB; |
u32 render_irqs; |
u32 hotplug_mask; |
dev_priv->irq_mask = ~display_mask; |
1766,8 → 1693,19 |
I915_WRITE(GTIER, render_irqs); |
POSTING_READ(GTIER); |
ibx_irq_postinstall(dev); |
hotplug_mask = (SDE_CRT_HOTPLUG_CPT | |
SDE_PORTB_HOTPLUG_CPT | |
SDE_PORTC_HOTPLUG_CPT | |
SDE_PORTD_HOTPLUG_CPT); |
dev_priv->pch_irq_mask = ~hotplug_mask; |
I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask); |
I915_WRITE(SDEIER, hotplug_mask); |
POSTING_READ(SDEIER); |
// ironlake_enable_pch_hotplug(dev); |
return 0; |
} |
1775,6 → 1713,7 |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 enable_mask; |
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
u32 render_irqs; |
u16 msid; |
1803,9 → 1742,6 |
// msid |= (1<<14); |
// pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); |
I915_WRITE(PORT_HOTPLUG_EN, 0); |
POSTING_READ(PORT_HOTPLUG_EN); |
I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
I915_WRITE(VLV_IER, enable_mask); |
I915_WRITE(VLV_IIR, 0xffffffff); |
1814,7 → 1750,6 |
POSTING_READ(VLV_IER); |
i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
I915_WRITE(VLV_IIR, 0xffffffff); |
1835,22 → 1770,14 |
#endif |
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
return 0; |
} |
static void valleyview_hpd_irq_setup(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
#if 0 /* FIXME: check register definitions; some have moved */ |
/* Note HDMI and DP share bits */ |
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTD_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMID_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) |
hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) |
1859,8 → 1786,11 |
hotplug_en |= CRT_HOTPLUG_INT_EN; |
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
} |
#endif |
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
return 0; |
} |
static void valleyview_irq_uninstall(struct drm_device *dev) |
2092,40 → 2022,28 |
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
I915_USER_INTERRUPT; |
#if 0 |
if (I915_HAS_HOTPLUG(dev)) { |
I915_WRITE(PORT_HOTPLUG_EN, 0); |
POSTING_READ(PORT_HOTPLUG_EN); |
/* Enable in IER... */ |
enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
/* and unmask in IMR */ |
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
} |
#endif |
I915_WRITE(IMR, dev_priv->irq_mask); |
I915_WRITE(IER, enable_mask); |
POSTING_READ(IER); |
// intel_opregion_enable_asle(dev); |
return 0; |
} |
static void i915_hpd_irq_setup(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 hotplug_en; |
if (I915_HAS_HOTPLUG(dev)) { |
hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTD_HOTPLUG_INT_EN; |
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
#if 0 |
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMID_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) |
hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) |
2134,11 → 2052,15 |
hotplug_en |= CRT_HOTPLUG_INT_EN; |
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
} |
#endif |
/* Ignore TV since it's buggy */ |
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
} |
// intel_opregion_enable_asle(dev); |
return 0; |
} |
static irqreturn_t i915_irq_handler(int irq, void *arg) |
2197,9 → 2119,9 |
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
hotplug_status); |
if (hotplug_status & dev_priv->hotplug_supported_mask) |
queue_work(dev_priv->wq, |
&dev_priv->hotplug_work); |
// if (hotplug_status & dev_priv->hotplug_supported_mask) |
// queue_work(dev_priv->wq, |
// &dev_priv->hotplug_work); |
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
POSTING_READ(PORT_HOTPLUG_STAT); |
2298,6 → 2220,7 |
static int i965_irq_postinstall(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 hotplug_en; |
u32 enable_mask; |
u32 error_mask; |
2318,7 → 2241,6 |
dev_priv->pipestat[0] = 0; |
dev_priv->pipestat[1] = 0; |
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
/* |
* Enable some error detection, note the instruction error mask |
2339,27 → 2261,15 |
I915_WRITE(IER, enable_mask); |
POSTING_READ(IER); |
I915_WRITE(PORT_HOTPLUG_EN, 0); |
POSTING_READ(PORT_HOTPLUG_EN); |
// intel_opregion_enable_asle(dev); |
return 0; |
} |
static void i965_hpd_irq_setup(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
u32 hotplug_en; |
/* Note HDMI and DP share hotplug bits */ |
hotplug_en = 0; |
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS) |
hotplug_en |= PORTD_HOTPLUG_INT_EN; |
#if 0 |
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
hotplug_en |= HDMID_HOTPLUG_INT_EN; |
if (IS_G4X(dev)) { |
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) |
hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
2382,10 → 2292,14 |
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; |
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
} |
#endif |
/* Ignore TV since it's buggy */ |
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
// intel_opregion_enable_asle(dev); |
return 0; |
} |
static irqreturn_t i965_irq_handler(int irq, void *arg) |
2444,9 → 2358,9 |
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
hotplug_status); |
if (hotplug_status & dev_priv->hotplug_supported_mask) |
queue_work(dev_priv->wq, |
&dev_priv->hotplug_work); |
// if (hotplug_status & dev_priv->hotplug_supported_mask) |
// queue_work(dev_priv->wq, |
// &dev_priv->hotplug_work); |
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
I915_READ(PORT_HOTPLUG_STAT); |
2481,9 → 2395,6 |
// if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
// intel_opregion_asle_intr(dev); |
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
gmbus_irq_handler(dev); |
/* With MSI, interrupts are only generated when iir |
* transitions from zero to nonzero. If another bit got |
* set while we were handling the existing iir bits, then |
2534,22 → 2445,20 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
// pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
if (IS_VALLEYVIEW(dev)) { |
dev->driver->irq_handler = valleyview_irq_handler; |
dev->driver->irq_preinstall = valleyview_irq_preinstall; |
dev->driver->irq_postinstall = valleyview_irq_postinstall; |
dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup; |
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
} else if (IS_IVYBRIDGE(dev)) { |
/* Share pre & uninstall handlers with ILK/SNB */ |
dev->driver->irq_handler = ivybridge_irq_handler; |
dev->driver->irq_preinstall = ironlake_irq_preinstall; |
dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
} else if (IS_HASWELL(dev)) { |
/* Share interrupts handling with IVB */ |
dev->driver->irq_handler = ivybridge_irq_handler; |
dev->driver->irq_preinstall = ironlake_irq_preinstall; |
dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
} else if (HAS_PCH_SPLIT(dev)) { |
dev->driver->irq_handler = ironlake_irq_handler; |
dev->driver->irq_preinstall = ironlake_irq_preinstall; |
2560,25 → 2469,16 |
dev->driver->irq_preinstall = i915_irq_preinstall; |
dev->driver->irq_postinstall = i915_irq_postinstall; |
dev->driver->irq_handler = i915_irq_handler; |
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
} else { |
dev->driver->irq_preinstall = i965_irq_preinstall; |
dev->driver->irq_postinstall = i965_irq_postinstall; |
dev->driver->irq_handler = i965_irq_handler; |
dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup; |
} |
} |
} |
void intel_hpd_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (dev_priv->display.hpd_irq_setup) |
dev_priv->display.hpd_irq_setup(dev); |
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ; |
} |
irqreturn_t intel_irq_handler(struct drm_device *dev) |
{ |
/drivers/video/drm/i915/kms_display.c |
---|
480,7 → 480,7 |
/* You don't need to worry about fragmentation issues. |
* GTT space is continuous. I guarantee it. */ |
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + obj->gtt_offset, |
mapped = bits = (u32*)MapIoMem(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, |
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW); |
if (unlikely(bits == NULL)) |
681,12 → 681,6 |
u32 slot; |
int ret; |
if(mask->handle == -2) |
{ |
printf("%s handle %d\n", __FUNCTION__, mask->handle); |
return 0; |
} |
obj = drm_gem_object_lookup(dev, file, mask->handle); |
if (obj == NULL) |
return -ENOENT; |
889,12 → 883,6 |
return 1; |
}; |
bool queue_work(struct workqueue_struct *wq, struct work_struct *work) |
{ |
return __queue_work(wq, work); |
} |
void __stdcall delayed_work_timer_fn(unsigned long __data) |
{ |
struct delayed_work *dwork = (struct delayed_work *)__data; |
974,61 → 962,4 |
} |
void |
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) |
{ |
unsigned long flags; |
// wait->flags &= ~WQ_FLAG_EXCLUSIVE; |
spin_lock_irqsave(&q->lock, flags); |
if (list_empty(&wait->task_list)) |
__add_wait_queue(q, wait); |
spin_unlock_irqrestore(&q->lock, flags); |
} |
/** |
* finish_wait - clean up after waiting in a queue |
* @q: waitqueue waited on |
* @wait: wait descriptor |
* |
* Sets current thread back to running state and removes |
* the wait descriptor from the given waitqueue if still |
* queued. |
*/ |
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait) |
{ |
unsigned long flags; |
// __set_current_state(TASK_RUNNING); |
/* |
* We can check for list emptiness outside the lock |
* IFF: |
* - we use the "careful" check that verifies both |
* the next and prev pointers, so that there cannot |
* be any half-pending updates in progress on other |
* CPU's that we haven't seen yet (and that might |
* still change the stack area. |
* and |
* - all other users take the lock (ie we can only |
* have _one_ other CPU that looks at or modifies |
* the list). |
*/ |
if (!list_empty_careful(&wait->task_list)) { |
spin_lock_irqsave(&q->lock, flags); |
list_del_init(&wait->task_list); |
spin_unlock_irqrestore(&q->lock, flags); |
} |
DestroyEvent(wait->evnt); |
} |
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key) |
{ |
list_del_init(&wait->task_list); |
return 1; |
} |
/drivers/video/drm/i915/main.c |
---|
57,7 → 57,7 |
int i915_modeset = 1; |
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline) |
u32_t drvEntry(int action, char *cmdline) |
{ |
int err = 0; |
82,10 → 82,10 |
return 0; |
}; |
} |
dbgprintf(" i915 v3.9-rc8\n cmdline: %s\n", cmdline); |
dbgprintf("i915 RC 10.5\n cmdline: %s\n", cmdline); |
cpu_detect(); |
// dbgprintf("\ncache line size %d\n", x86_clflush_size); |
dbgprintf("\ncache line size %d\n", x86_clflush_size); |
enum_pci_devices(); |
105,14 → 105,6 |
return err; |
}; |
//int __declspec(dllexport) DllMain(int, char*) __attribute__ ((weak, alias ("drvEntry"))); |
//int __declspec(dllexport) DllMain( int hinstDLL, int fdwReason, void *lpReserved ) |
//{ |
// |
// return 1; |
//} |
#define CURRENT_API 0x0200 /* 2.00 */ |
#define COMPATIBLE_API 0x0100 /* 1.00 */ |
146,7 → 138,7 |
#define SRV_I915_GEM_BUSY 28 |
#define SRV_I915_GEM_SET_DOMAIN 29 |
#define SRV_I915_GEM_MMAP 30 |
#define SRV_I915_GEM_MMAP_GTT 31 |
#define SRV_I915_GEM_THROTTLE 32 |
#define SRV_FBINFO 33 |
#define SRV_I915_GEM_EXECBUFFER2 34 |
275,11 → 267,6 |
retval = i915_gem_mmap_ioctl(main_device, inp, file); |
break; |
case SRV_I915_GEM_MMAP_GTT: |
retval = i915_gem_mmap_gtt_ioctl(main_device, inp, file); |
break; |
case SRV_FBINFO: |
retval = i915_fbinfo(inp); |
break; |
/drivers/video/drm/i915/utils.c |
---|
4,7 → 4,6 |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
#include "intel_drv.h" |
#include <linux/hdmi.h> |
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags) |
103,410 → 102,3 |
if(filep->pages) |
kfree(filep->pages); |
} |
/** |
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe |
* @frame: HDMI AVI infoframe |
* |
* Returns 0 on success or a negative error code on failure. |
*/ |
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame) |
{ |
memset(frame, 0, sizeof(*frame)); |
frame->type = HDMI_INFOFRAME_TYPE_AVI; |
frame->version = 2; |
frame->length = 13; |
return 0; |
} |
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes) |
{ |
while (bytes) { |
if (*start != value) |
return (void *)start; |
start++; |
bytes--; |
} |
return NULL; |
} |
/** |
* memchr_inv - Find an unmatching character in an area of memory. |
* @start: The memory area |
* @c: Find a character other than c |
* @bytes: The size of the area. |
* |
* returns the address of the first character other than @c, or %NULL |
* if the whole buffer contains just @c. |
*/ |
void *memchr_inv(const void *start, int c, size_t bytes) |
{ |
u8 value = c; |
u64 value64; |
unsigned int words, prefix; |
if (bytes <= 16) |
return check_bytes8(start, value, bytes); |
value64 = value; |
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
value64 *= 0x0101010101010101; |
#elif defined(ARCH_HAS_FAST_MULTIPLIER) |
value64 *= 0x01010101; |
value64 |= value64 << 32; |
#else |
value64 |= value64 << 8; |
value64 |= value64 << 16; |
value64 |= value64 << 32; |
#endif |
prefix = (unsigned long)start % 8; |
if (prefix) { |
u8 *r; |
prefix = 8 - prefix; |
r = check_bytes8(start, value, prefix); |
if (r) |
return r; |
start += prefix; |
bytes -= prefix; |
} |
words = bytes / 8; |
while (words) { |
if (*(u64 *)start != value64) |
return check_bytes8(start, value, 8); |
start += 8; |
words--; |
} |
return check_bytes8(start, value, bytes % 8); |
} |
int dma_map_sg(struct device *dev, struct scatterlist *sglist, |
int nelems, int dir) |
{ |
struct scatterlist *s; |
int i; |
for_each_sg(sglist, s, nelems, i) { |
s->dma_address = (dma_addr_t)sg_phys(s); |
#ifdef CONFIG_NEED_SG_DMA_LENGTH |
s->dma_length = s->length; |
#endif |
} |
return nelems; |
} |
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) |
{ |
int i; |
i = vsnprintf(buf, size, fmt, args); |
if (likely(i < size)) |
return i; |
if (size != 0) |
return size - 1; |
return 0; |
} |
int scnprintf(char *buf, size_t size, const char *fmt, ...) |
{ |
va_list args; |
int i; |
va_start(args, fmt); |
i = vscnprintf(buf, size, fmt, args); |
va_end(args); |
return i; |
} |
#define _U 0x01 /* upper */ |
#define _L 0x02 /* lower */ |
#define _D 0x04 /* digit */ |
#define _C 0x08 /* cntrl */ |
#define _P 0x10 /* punct */ |
#define _S 0x20 /* white space (space/lf/tab) */ |
#define _X 0x40 /* hex digit */ |
#define _SP 0x80 /* hard space (0x20) */ |
extern const unsigned char _ctype[]; |
#define __ismask(x) (_ctype[(int)(unsigned char)(x)]) |
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0) |
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0) |
#define iscntrl(c) ((__ismask(c)&(_C)) != 0) |
#define isdigit(c) ((__ismask(c)&(_D)) != 0) |
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0) |
#define islower(c) ((__ismask(c)&(_L)) != 0) |
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0) |
#define ispunct(c) ((__ismask(c)&(_P)) != 0) |
/* Note: isspace() must return false for %NUL-terminator */ |
#define isspace(c) ((__ismask(c)&(_S)) != 0) |
#define isupper(c) ((__ismask(c)&(_U)) != 0) |
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0) |
#define isascii(c) (((unsigned char)(c))<=0x7f) |
#define toascii(c) (((unsigned char)(c))&0x7f) |
static inline unsigned char __tolower(unsigned char c) |
{ |
if (isupper(c)) |
c -= 'A'-'a'; |
return c; |
} |
static inline unsigned char __toupper(unsigned char c) |
{ |
if (islower(c)) |
c -= 'a'-'A'; |
return c; |
} |
#define tolower(c) __tolower(c) |
#define toupper(c) __toupper(c) |
/* |
* Fast implementation of tolower() for internal usage. Do not use in your |
* code. |
*/ |
static inline char _tolower(const char c) |
{ |
return c | 0x20; |
} |
//const char hex_asc[] = "0123456789abcdef"; |
/** |
* hex_to_bin - convert a hex digit to its real value |
* @ch: ascii character represents hex digit |
* |
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad |
* input. |
*/ |
int hex_to_bin(char ch) |
{ |
if ((ch >= '0') && (ch <= '9')) |
return ch - '0'; |
ch = tolower(ch); |
if ((ch >= 'a') && (ch <= 'f')) |
return ch - 'a' + 10; |
return -1; |
} |
EXPORT_SYMBOL(hex_to_bin); |
/** |
* hex2bin - convert an ascii hexadecimal string to its binary representation |
* @dst: binary result |
* @src: ascii hexadecimal string |
* @count: result length |
* |
* Return 0 on success, -1 in case of bad input. |
*/ |
int hex2bin(u8 *dst, const char *src, size_t count) |
{ |
while (count--) { |
int hi = hex_to_bin(*src++); |
int lo = hex_to_bin(*src++); |
if ((hi < 0) || (lo < 0)) |
return -1; |
*dst++ = (hi << 4) | lo; |
} |
return 0; |
} |
EXPORT_SYMBOL(hex2bin); |
/** |
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory |
* @buf: data blob to dump |
* @len: number of bytes in the @buf |
* @rowsize: number of bytes to print per line; must be 16 or 32 |
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) |
* @linebuf: where to put the converted data |
* @linebuflen: total size of @linebuf, including space for terminating NUL |
* @ascii: include ASCII after the hex output |
* |
* hex_dump_to_buffer() works on one "line" of output at a time, i.e., |
* 16 or 32 bytes of input data converted to hex + ASCII output. |
* |
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data |
* to a hex + ASCII dump at the supplied memory location. |
* The converted output is always NUL-terminated. |
* |
* E.g.: |
* hex_dump_to_buffer(frame->data, frame->len, 16, 1, |
* linebuf, sizeof(linebuf), true); |
* |
* example output buffer: |
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO |
*/ |
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize, |
int groupsize, char *linebuf, size_t linebuflen, |
bool ascii) |
{ |
const u8 *ptr = buf; |
u8 ch; |
int j, lx = 0; |
int ascii_column; |
if (rowsize != 16 && rowsize != 32) |
rowsize = 16; |
if (!len) |
goto nil; |
if (len > rowsize) /* limit to one line at a time */ |
len = rowsize; |
if ((len % groupsize) != 0) /* no mixed size output */ |
groupsize = 1; |
switch (groupsize) { |
case 8: { |
const u64 *ptr8 = buf; |
int ngroups = len / groupsize; |
for (j = 0; j < ngroups; j++) |
lx += scnprintf(linebuf + lx, linebuflen - lx, |
"%s%16.16llx", j ? " " : "", |
(unsigned long long)*(ptr8 + j)); |
ascii_column = 17 * ngroups + 2; |
break; |
} |
case 4: { |
const u32 *ptr4 = buf; |
int ngroups = len / groupsize; |
for (j = 0; j < ngroups; j++) |
lx += scnprintf(linebuf + lx, linebuflen - lx, |
"%s%8.8x", j ? " " : "", *(ptr4 + j)); |
ascii_column = 9 * ngroups + 2; |
break; |
} |
case 2: { |
const u16 *ptr2 = buf; |
int ngroups = len / groupsize; |
for (j = 0; j < ngroups; j++) |
lx += scnprintf(linebuf + lx, linebuflen - lx, |
"%s%4.4x", j ? " " : "", *(ptr2 + j)); |
ascii_column = 5 * ngroups + 2; |
break; |
} |
default: |
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { |
ch = ptr[j]; |
linebuf[lx++] = hex_asc_hi(ch); |
linebuf[lx++] = hex_asc_lo(ch); |
linebuf[lx++] = ' '; |
} |
if (j) |
lx--; |
ascii_column = 3 * rowsize + 2; |
break; |
} |
if (!ascii) |
goto nil; |
while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) |
linebuf[lx++] = ' '; |
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) { |
ch = ptr[j]; |
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.'; |
} |
nil: |
linebuf[lx++] = '\0'; |
} |
/** |
* print_hex_dump - print a text hex dump to syslog for a binary blob of data |
* @level: kernel log level (e.g. KERN_DEBUG) |
* @prefix_str: string to prefix each line with; |
* caller supplies trailing spaces for alignment if desired |
* @prefix_type: controls whether prefix of an offset, address, or none |
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE) |
* @rowsize: number of bytes to print per line; must be 16 or 32 |
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1) |
* @buf: data blob to dump |
* @len: number of bytes in the @buf |
* @ascii: include ASCII after the hex output |
* |
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump |
* to the kernel log at the specified kernel log level, with an optional |
* leading prefix. |
* |
* print_hex_dump() works on one "line" of output at a time, i.e., |
* 16 or 32 bytes of input data converted to hex + ASCII output. |
* print_hex_dump() iterates over the entire input @buf, breaking it into |
* "line size" chunks to format and print. |
* |
* E.g.: |
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS, |
* 16, 1, frame->data, frame->len, true); |
* |
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode: |
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO |
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode: |
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~. |
*/ |
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type, |
int rowsize, int groupsize, |
const void *buf, size_t len, bool ascii) |
{ |
const u8 *ptr = buf; |
int i, linelen, remaining = len; |
unsigned char linebuf[32 * 3 + 2 + 32 + 1]; |
if (rowsize != 16 && rowsize != 32) |
rowsize = 16; |
for (i = 0; i < len; i += rowsize) { |
linelen = min(remaining, rowsize); |
remaining -= rowsize; |
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, |
linebuf, sizeof(linebuf), ascii); |
switch (prefix_type) { |
case DUMP_PREFIX_ADDRESS: |
printk("%s%s%p: %s\n", |
level, prefix_str, ptr + i, linebuf); |
break; |
case DUMP_PREFIX_OFFSET: |
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); |
break; |
default: |
printk("%s%s%s\n", level, prefix_str, linebuf); |
break; |
} |
} |
} |
void print_hex_dump_bytes(const char *prefix_str, int prefix_type, |
const void *buf, size_t len) |
{ |
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1, |
buf, len, true); |
} |
/drivers/video/drm/i915/i915_gem_execbuffer.c |
---|
63,43 → 63,25 |
} |
struct eb_objects { |
struct list_head objects; |
int and; |
union { |
struct drm_i915_gem_object *lut[0]; |
struct hlist_head buckets[0]; |
}; |
}; |
static struct eb_objects * |
eb_create(struct drm_i915_gem_execbuffer2 *args) |
eb_create(int size) |
{ |
struct eb_objects *eb = NULL; |
if (args->flags & I915_EXEC_HANDLE_LUT) { |
int size = args->buffer_count; |
size *= sizeof(struct drm_i915_gem_object *); |
size += sizeof(struct eb_objects); |
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
} |
if (eb == NULL) { |
int size = args->buffer_count; |
struct eb_objects *eb; |
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2; |
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head)); |
while (count > 2*size) |
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head))); |
while (count > size) |
count >>= 1; |
eb = kzalloc(count*sizeof(struct hlist_head) + |
sizeof(struct eb_objects), |
GFP_TEMPORARY); |
GFP_KERNEL); |
if (eb == NULL) |
return eb; |
eb->and = count - 1; |
} else |
eb->and = -args->buffer_count; |
INIT_LIST_HEAD(&eb->objects); |
return eb; |
} |
106,93 → 88,36 |
static void |
eb_reset(struct eb_objects *eb) |
{ |
if (eb->and >= 0) |
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head)); |
} |
static int |
eb_lookup_objects(struct eb_objects *eb, |
struct drm_i915_gem_exec_object2 *exec, |
const struct drm_i915_gem_execbuffer2 *args, |
struct drm_file *file) |
static void |
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj) |
{ |
int i; |
spin_lock(&file->table_lock); |
for (i = 0; i < args->buffer_count; i++) { |
struct drm_i915_gem_object *obj; |
if(exec[i].handle == -2) |
obj = get_fb_obj(); |
else |
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle)); |
if (obj == NULL) { |
spin_unlock(&file->table_lock); |
DRM_DEBUG("Invalid object handle %d at index %d\n", |
exec[i].handle, i); |
return -ENOENT; |
} |
if (!list_empty(&obj->exec_list)) { |
spin_unlock(&file->table_lock); |
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
obj, exec[i].handle, i); |
return -EINVAL; |
} |
drm_gem_object_reference(&obj->base); |
list_add_tail(&obj->exec_list, &eb->objects); |
obj->exec_entry = &exec[i]; |
if (eb->and < 0) { |
eb->lut[i] = obj; |
} else { |
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle; |
obj->exec_handle = handle; |
hlist_add_head(&obj->exec_node, |
&eb->buckets[handle & eb->and]); |
&eb->buckets[obj->exec_handle & eb->and]); |
} |
} |
spin_unlock(&file->table_lock); |
return 0; |
} |
static struct drm_i915_gem_object * |
eb_get_object(struct eb_objects *eb, unsigned long handle) |
{ |
if (eb->and < 0) { |
if (handle >= -eb->and) |
return NULL; |
return eb->lut[handle]; |
} else { |
struct hlist_head *head; |
struct hlist_node *node; |
struct drm_i915_gem_object *obj; |
head = &eb->buckets[handle & eb->and]; |
hlist_for_each(node, head) { |
struct drm_i915_gem_object *obj; |
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node); |
if (obj->exec_handle == handle) |
return obj; |
} |
return NULL; |
} |
} |
static void |
eb_destroy(struct eb_objects *eb) |
{ |
while (!list_empty(&eb->objects)) { |
struct drm_i915_gem_object *obj; |
obj = list_first_entry(&eb->objects, |
struct drm_i915_gem_object, |
exec_list); |
list_del_init(&obj->exec_list); |
drm_gem_object_unreference(&obj->base); |
} |
kfree(eb); |
} |
254,6 → 179,17 |
reloc->write_domain); |
return ret; |
} |
if (unlikely(reloc->write_domain && target_obj->pending_write_domain && |
reloc->write_domain != target_obj->pending_write_domain)) { |
DRM_DEBUG("Write domain conflict: " |
"obj %p target %d offset %d " |
"new %08x old %08x\n", |
obj, reloc->target_handle, |
(int) reloc->offset, |
reloc->write_domain, |
target_obj->pending_write_domain); |
return ret; |
} |
target_obj->pending_read_domains |= reloc->read_domains; |
target_obj->pending_write_domain |= reloc->write_domain; |
282,7 → 218,10 |
} |
/* We can't wait for rendering with pagefaults disabled */ |
// if (obj->active && in_atomic()) |
// return -EFAULT; |
reloc->delta += target_offset; |
if (use_cpu_reloc(obj)) { |
uint32_t page_offset = reloc->offset & ~PAGE_MASK; |
385,7 → 324,8 |
static int |
i915_gem_execbuffer_relocate(struct drm_device *dev, |
struct eb_objects *eb) |
struct eb_objects *eb, |
struct list_head *objects) |
{ |
struct drm_i915_gem_object *obj; |
int ret = 0; |
398,7 → 338,7 |
* lockdep complains vehemently. |
*/ |
// pagefault_disable(); |
list_for_each_entry(obj, &eb->objects, exec_list) { |
list_for_each_entry(obj, objects, exec_list) { |
ret = i915_gem_execbuffer_relocate_object(obj, eb); |
if (ret) |
break; |
420,8 → 360,7 |
static int |
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj, |
struct intel_ring_buffer *ring, |
bool *need_reloc) |
struct intel_ring_buffer *ring) |
{ |
struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry; |
470,20 → 409,9 |
obj->has_aliasing_ppgtt_mapping = 1; |
} |
if (entry->offset != obj->gtt_offset) { |
entry->offset = obj->gtt_offset; |
*need_reloc = true; |
} |
// LEAVE(); |
if (entry->flags & EXEC_OBJECT_WRITE) { |
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER; |
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER; |
} |
if (entry->flags & EXEC_OBJECT_NEEDS_GTT && |
!obj->has_global_gtt_mapping) |
i915_gem_gtt_bind_object(obj, obj->cache_level); |
return 0; |
} |
509,8 → 437,7 |
static int |
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring, |
struct drm_file *file, |
struct list_head *objects, |
bool *need_relocs) |
struct list_head *objects) |
{ |
struct drm_i915_gem_object *obj; |
struct list_head ordered_objects; |
540,7 → 467,7 |
else |
list_move_tail(&obj->exec_list, &ordered_objects); |
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND; |
obj->base.pending_read_domains = 0; |
obj->base.pending_write_domain = 0; |
obj->pending_fenced_gpu_access = false; |
} |
580,7 → 507,7 |
(need_mappable && !obj->map_and_fenceable)) |
ret = i915_gem_object_unbind(obj); |
else |
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
ret = i915_gem_execbuffer_reserve_object(obj, ring); |
if (ret) |
goto err; |
} |
590,7 → 517,7 |
if (obj->gtt_space) |
continue; |
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs); |
ret = i915_gem_execbuffer_reserve_object(obj, ring); |
if (ret) |
goto err; |
} |
613,22 → 540,21 |
static int |
i915_gem_execbuffer_relocate_slow(struct drm_device *dev, |
struct drm_i915_gem_execbuffer2 *args, |
struct drm_file *file, |
struct intel_ring_buffer *ring, |
struct list_head *objects, |
struct eb_objects *eb, |
struct drm_i915_gem_exec_object2 *exec) |
struct drm_i915_gem_exec_object2 *exec, |
int count) |
{ |
struct drm_i915_gem_relocation_entry *reloc; |
struct drm_i915_gem_object *obj; |
bool need_relocs; |
int *reloc_offset; |
int i, total, ret; |
int count = args->buffer_count; |
/* We may process another execbuffer during the unlock... */ |
while (!list_empty(&eb->objects)) { |
obj = list_first_entry(&eb->objects, |
while (!list_empty(objects)) { |
obj = list_first_entry(objects, |
struct drm_i915_gem_object, |
exec_list); |
list_del_init(&obj->exec_list); |
696,16 → 622,34 |
/* reacquire the objects */ |
eb_reset(eb); |
ret = eb_lookup_objects(eb, exec, args, file); |
if (ret) |
for (i = 0; i < count; i++) { |
if(exec[i].handle == -2) |
{ |
obj = get_fb_obj(); |
drm_gem_object_reference(&obj->base); |
} |
else |
obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
exec[i].handle)); |
if (&obj->base == NULL) { |
DRM_DEBUG("Invalid object handle %d at index %d\n", |
exec[i].handle, i); |
ret = -ENOENT; |
goto err; |
} |
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); |
list_add_tail(&obj->exec_list, objects); |
obj->exec_handle = exec[i].handle; |
obj->exec_entry = &exec[i]; |
eb_add_object(eb, obj); |
} |
ret = i915_gem_execbuffer_reserve(ring, file, objects); |
if (ret) |
goto err; |
list_for_each_entry(obj, &eb->objects, exec_list) { |
list_for_each_entry(obj, objects, exec_list) { |
int offset = obj->exec_entry - exec; |
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb, |
reloc + reloc_offset[offset]); |
726,11 → 670,44 |
} |
static int |
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) |
{ |
u32 plane, flip_mask; |
int ret; |
/* Check for any pending flips. As we only maintain a flip queue depth |
* of 1, we can simply insert a WAIT for the next display flip prior |
* to executing the batch and avoid stalling the CPU. |
*/ |
for (plane = 0; flips >> plane; plane++) { |
if (((flips >> plane) & 1) == 0) |
continue; |
if (plane) |
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; |
else |
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; |
ret = intel_ring_begin(ring, 2); |
if (ret) |
return ret; |
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask); |
intel_ring_emit(ring, MI_NOOP); |
intel_ring_advance(ring); |
} |
return 0; |
} |
static int |
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring, |
struct list_head *objects) |
{ |
struct drm_i915_gem_object *obj; |
uint32_t flush_domains = 0; |
uint32_t flips = 0; |
int ret; |
list_for_each_entry(obj, objects, exec_list) { |
741,9 → 718,18 |
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU) |
i915_gem_clflush_object(obj); |
if (obj->base.pending_write_domain) |
flips |= atomic_read(&obj->pending_flip); |
flush_domains |= obj->base.write_domain; |
} |
if (flips) { |
ret = i915_gem_execbuffer_wait_for_flips(ring, flips); |
if (ret) |
return ret; |
} |
if (flush_domains & I915_GEM_DOMAIN_CPU) |
i915_gem_chipset_flush(ring->dev); |
759,9 → 745,6 |
static bool |
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec) |
{ |
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) |
return false; |
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; |
} |
770,26 → 753,21 |
int count) |
{ |
int i; |
int relocs_total = 0; |
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry); |
for (i = 0; i < count; i++) { |
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
int length; /* limited by fault_in_pages_readable() */ |
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS) |
/* First check for malicious input causing overflow */ |
if (exec[i].relocation_count > |
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) |
return -EINVAL; |
/* First check for malicious input causing overflow in |
* the worst case where we need to allocate the entire |
* relocation tree as a single array. |
*/ |
if (exec[i].relocation_count > relocs_max - relocs_total) |
return -EINVAL; |
relocs_total += exec[i].relocation_count; |
length = exec[i].relocation_count * |
sizeof(struct drm_i915_gem_relocation_entry); |
// if (!access_ok(VERIFY_READ, ptr, length)) |
// return -EFAULT; |
/* we may also need to update the presumed offsets */ |
// if (!access_ok(VERIFY_WRITE, ptr, length)) |
// return -EFAULT; |
811,10 → 789,8 |
u32 old_read = obj->base.read_domains; |
u32 old_write = obj->base.write_domain; |
obj->base.read_domains = obj->base.pending_read_domains; |
obj->base.write_domain = obj->base.pending_write_domain; |
if (obj->base.write_domain == 0) |
obj->base.pending_read_domains |= obj->base.read_domains; |
obj->base.read_domains = obj->base.pending_read_domains; |
obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
i915_gem_object_move_to_active(obj, ring); |
873,6 → 849,7 |
struct drm_i915_gem_exec_object2 *exec) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct list_head objects; |
struct eb_objects *eb; |
struct drm_i915_gem_object *batch_obj; |
struct drm_clip_rect *cliprects = NULL; |
879,12 → 856,12 |
struct intel_ring_buffer *ring; |
u32 ctx_id = i915_execbuffer2_get_context_id(*args); |
u32 exec_start, exec_len; |
u32 mask, flags; |
u32 mask; |
u32 flags; |
int ret, mode, i; |
bool need_relocs; |
if (!i915_gem_check_execbuffer(args)) |
{ |
if (!i915_gem_check_execbuffer(args)) { |
DRM_DEBUG("execbuf with invalid offset/length\n"); |
FAIL(); |
return -EINVAL; |
} |
898,6 → 875,8 |
flags = 0; |
if (args->flags & I915_EXEC_SECURE) { |
// if (!file->is_master || !capable(CAP_SYS_ADMIN)) |
// return -EPERM; |
flags |= I915_DISPATCH_SECURE; |
} |
1010,7 → 989,7 |
goto pre_mutex_err; |
} |
eb = eb_create(args); |
eb = eb_create(args->buffer_count); |
if (eb == NULL) { |
mutex_unlock(&dev->struct_mutex); |
ret = -ENOMEM; |
1018,28 → 997,60 |
} |
/* Look up object handles */ |
ret = eb_lookup_objects(eb, exec, args, file); |
if (ret) |
INIT_LIST_HEAD(&objects); |
for (i = 0; i < args->buffer_count; i++) { |
struct drm_i915_gem_object *obj; |
if(exec[i].handle == -2) |
{ |
obj = get_fb_obj(); |
drm_gem_object_reference(&obj->base); |
} |
else |
obj = to_intel_bo(drm_gem_object_lookup(dev, file, |
exec[i].handle)); |
// printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle); |
if (&obj->base == NULL) { |
DRM_DEBUG("Invalid object handle %d at index %d\n", |
exec[i].handle, i); |
/* prevent error path from reading uninitialized data */ |
ret = -ENOENT; |
goto err; |
} |
if (!list_empty(&obj->exec_list)) { |
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", |
obj, exec[i].handle, i); |
ret = -EINVAL; |
goto err; |
} |
list_add_tail(&obj->exec_list, &objects); |
obj->exec_handle = exec[i].handle; |
obj->exec_entry = &exec[i]; |
eb_add_object(eb, obj); |
} |
/* take note of the batch buffer before we might reorder the lists */ |
batch_obj = list_entry(eb->objects.prev, |
batch_obj = list_entry(objects.prev, |
struct drm_i915_gem_object, |
exec_list); |
/* Move the objects en-masse into the GTT, evicting if necessary. */ |
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0; |
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs); |
ret = i915_gem_execbuffer_reserve(ring, file, &objects); |
if (ret) |
goto err; |
/* The objects are in their final locations, apply the relocations. */ |
if (need_relocs) |
ret = i915_gem_execbuffer_relocate(dev, eb); |
ret = i915_gem_execbuffer_relocate(dev, eb, &objects); |
if (ret) { |
if (ret == -EFAULT) { |
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring, |
eb, exec); |
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring, |
&objects, eb, |
exec, |
args->buffer_count); |
BUG_ON(!mutex_is_locked(&dev->struct_mutex)); |
} |
if (ret) |
1061,7 → 1072,7 |
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping) |
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level); |
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects); |
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects); |
if (ret) |
goto err; |
1093,7 → 1104,18 |
exec_start = batch_obj->gtt_offset + args->batch_start_offset; |
exec_len = args->batch_len; |
if (cliprects) { |
// for (i = 0; i < args->num_cliprects; i++) { |
// ret = i915_emit_box(dev, &cliprects[i], |
// args->DR1, args->DR4); |
// if (ret) |
// goto err; |
// ret = ring->dispatch_execbuffer(ring, |
// exec_start, exec_len, |
// flags); |
// if (ret) |
// goto err; |
// } |
} else { |
ret = ring->dispatch_execbuffer(ring, |
exec_start, exec_len, |
1104,21 → 1126,30 |
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags); |
i915_gem_execbuffer_move_to_active(&eb->objects, ring); |
i915_gem_execbuffer_move_to_active(&objects, ring); |
i915_gem_execbuffer_retire_commands(dev, file, ring); |
err: |
eb_destroy(eb); |
while (!list_empty(&objects)) { |
struct drm_i915_gem_object *obj; |
obj = list_first_entry(&objects, |
struct drm_i915_gem_object, |
exec_list); |
list_del_init(&obj->exec_list); |
drm_gem_object_unreference(&obj->base); |
} |
mutex_unlock(&dev->struct_mutex); |
pre_mutex_err: |
kfree(cliprects); |
return ret; |
} |
int |
i915_gem_execbuffer2(struct drm_device *dev, void *data, |
struct drm_file *file) |
1136,8 → 1167,11 |
return -EINVAL; |
} |
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count, |
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count); |
// if (exec2_list == NULL) |
// exec2_list = drm_malloc_ab(sizeof(*exec2_list), |
// args->buffer_count); |
if (exec2_list == NULL) { |
DRM_DEBUG("Failed to allocate exec list for %d buffers\n", |
args->buffer_count); |
/drivers/video/drm/i915/intel_display.c |
---|
166,8 → 166,8 |
.vco = { .min = 1400000, .max = 2800000 }, |
.n = { .min = 1, .max = 6 }, |
.m = { .min = 70, .max = 120 }, |
.m1 = { .min = 8, .max = 18 }, |
.m2 = { .min = 3, .max = 7 }, |
.m1 = { .min = 10, .max = 22 }, |
.m2 = { .min = 5, .max = 9 }, |
.p = { .min = 5, .max = 80 }, |
.p1 = { .min = 1, .max = 8 }, |
.p2 = { .dot_limit = 200000, |
180,8 → 180,8 |
.vco = { .min = 1400000, .max = 2800000 }, |
.n = { .min = 1, .max = 6 }, |
.m = { .min = 70, .max = 120 }, |
.m1 = { .min = 8, .max = 18 }, |
.m2 = { .min = 3, .max = 7 }, |
.m1 = { .min = 10, .max = 22 }, |
.m2 = { .min = 5, .max = 9 }, |
.p = { .min = 7, .max = 98 }, |
.p1 = { .min = 1, .max = 8 }, |
.p2 = { .dot_limit = 112000, |
428,11 → 428,13 |
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg) |
{ |
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
unsigned long flags; |
u32 val = 0; |
spin_lock_irqsave(&dev_priv->dpio_lock, flags); |
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
DRM_ERROR("DPIO idle wait timed out\n"); |
return 0; |
goto out_unlock; |
} |
I915_WRITE(DPIO_REG, reg); |
440,20 → 442,24 |
DPIO_BYTE); |
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
DRM_ERROR("DPIO read wait timed out\n"); |
return 0; |
goto out_unlock; |
} |
val = I915_READ(DPIO_DATA); |
return I915_READ(DPIO_DATA); |
out_unlock: |
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); |
return val; |
} |
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg, |
u32 val) |
{ |
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
unsigned long flags; |
spin_lock_irqsave(&dev_priv->dpio_lock, flags); |
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) { |
DRM_ERROR("DPIO idle wait timed out\n"); |
return; |
goto out_unlock; |
} |
I915_WRITE(DPIO_DATA, val); |
462,6 → 468,9 |
DPIO_BYTE); |
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) |
DRM_ERROR("DPIO write wait timed out\n"); |
out_unlock: |
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); |
} |
static void vlv_init_dpio(struct drm_device *dev) |
475,14 → 484,61 |
POSTING_READ(DPIO_CTL); |
} |
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) |
{ |
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); |
return 1; |
} |
static const struct dmi_system_id intel_dual_link_lvds[] = { |
{ |
.callback = intel_dual_link_lvds_callback, |
.ident = "Apple MacBook Pro (Core i5/i7 Series)", |
.matches = { |
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), |
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"), |
}, |
}, |
{ } /* terminating entry */ |
}; |
static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, |
unsigned int reg) |
{ |
unsigned int val; |
/* use the module option value if specified */ |
if (i915_lvds_channel_mode > 0) |
return i915_lvds_channel_mode == 2; |
// if (dmi_check_system(intel_dual_link_lvds)) |
// return true; |
if (dev_priv->lvds_val) |
val = dev_priv->lvds_val; |
else { |
/* BIOS should set the proper LVDS register value at boot, but |
* in reality, it doesn't set the value when the lid is closed; |
* we need to check "the value to be set" in VBT when LVDS |
* register is uninitialized. |
*/ |
val = I915_READ(reg); |
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) |
val = dev_priv->bios_lvds_val; |
dev_priv->lvds_val = val; |
} |
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; |
} |
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
int refclk) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
const intel_limit_t *limit; |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
if (intel_is_dual_link_lvds(dev)) { |
if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { |
/* LVDS dual channel */ |
if (refclk == 100000) |
limit = &intel_limits_ironlake_dual_lvds_100m; |
506,10 → 562,11 |
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
const intel_limit_t *limit; |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
if (intel_is_dual_link_lvds(dev)) |
if (is_dual_link_lvds(dev_priv, LVDS)) |
/* LVDS with dual channel */ |
limit = &intel_limits_g4x_dual_channel_lvds; |
else |
641,16 → 698,19 |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
intel_clock_t clock; |
int err = target; |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
(I915_READ(LVDS)) != 0) { |
/* |
* For LVDS just rely on its current settings for dual-channel. |
* We haven't figured out how to reliably set up different |
* single/dual channel state, if we even can. |
* For LVDS, if the panel is on, just rely on its current |
* settings for dual-channel. We haven't figured out how to |
* reliably set up different single/dual channel state, if we |
* even can. |
*/ |
if (intel_is_dual_link_lvds(dev)) |
if (is_dual_link_lvds(dev_priv, LVDS)) |
clock.p2 = limit->p2.p2_fast; |
else |
clock.p2 = limit->p2.p2_slow; |
703,6 → 763,7 |
intel_clock_t *best_clock) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
intel_clock_t clock; |
int max_n; |
bool found; |
717,7 → 778,8 |
lvds_reg = PCH_LVDS; |
else |
lvds_reg = LVDS; |
if (intel_is_dual_link_lvds(dev)) |
if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == |
LVDS_CLKB_POWER_UP) |
clock.p2 = limit->p2.p2_fast; |
else |
clock.p2 = limit->p2.p2_slow; |
997,51 → 1059,6 |
} |
} |
/* |
* ibx_digital_port_connected - is the specified port connected? |
* @dev_priv: i915 private structure |
* @port: the port to test |
* |
* Returns true if @port is connected, false otherwise. |
*/ |
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, |
struct intel_digital_port *port) |
{ |
u32 bit; |
if (HAS_PCH_IBX(dev_priv->dev)) { |
switch(port->port) { |
case PORT_B: |
bit = SDE_PORTB_HOTPLUG; |
break; |
case PORT_C: |
bit = SDE_PORTC_HOTPLUG; |
break; |
case PORT_D: |
bit = SDE_PORTD_HOTPLUG; |
break; |
default: |
return true; |
} |
} else { |
switch(port->port) { |
case PORT_B: |
bit = SDE_PORTB_HOTPLUG_CPT; |
break; |
case PORT_C: |
bit = SDE_PORTC_HOTPLUG_CPT; |
break; |
case PORT_D: |
bit = SDE_PORTD_HOTPLUG_CPT; |
break; |
default: |
return true; |
} |
} |
return I915_READ(SDEISR) & bit; |
} |
static const char *state_string(bool enabled) |
{ |
return enabled ? "on" : "off"; |
1120,8 → 1137,8 |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
if (HAS_DDI(dev_priv->dev)) { |
/* DDI does not have a specific FDI_TX register */ |
if (IS_HASWELL(dev_priv->dev)) { |
/* On Haswell, DDI is used instead of FDI_TX_CTL */ |
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder); |
val = I915_READ(reg); |
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE); |
1165,7 → 1182,7 |
return; |
/* On Haswell, DDI ports are responsible for the FDI PLL setup */ |
if (HAS_DDI(dev_priv->dev)) |
if (IS_HASWELL(dev_priv->dev)) |
return; |
reg = FDI_TX_CTL(pipe); |
1226,15 → 1243,9 |
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) |
state = true; |
if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP && |
!(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) { |
cur_state = false; |
} else { |
reg = PIPECONF(cpu_transcoder); |
val = I915_READ(reg); |
cur_state = !!(val & PIPECONF_ENABLE); |
} |
WARN(cur_state != state, |
"pipe %c assertion failure (expected %s, current %s)\n", |
pipe_name(pipe), state_string(state), state_string(cur_state)); |
1510,14 → 1521,13 |
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value, |
enum intel_sbi_destination destination) |
{ |
unsigned long flags; |
u32 tmp; |
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
100)) { |
spin_lock_irqsave(&dev_priv->dpio_lock, flags); |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { |
DRM_ERROR("timeout waiting for SBI to become ready\n"); |
return; |
goto out_unlock; |
} |
I915_WRITE(SBI_ADDR, (reg << 16)); |
1532,8 → 1542,11 |
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
100)) { |
DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); |
return; |
goto out_unlock; |
} |
out_unlock: |
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); |
} |
static u32 |
1540,13 → 1553,13 |
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg, |
enum intel_sbi_destination destination) |
{ |
unsigned long flags; |
u32 value = 0; |
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock)); |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, |
100)) { |
spin_lock_irqsave(&dev_priv->dpio_lock, flags); |
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) { |
DRM_ERROR("timeout waiting for SBI to become ready\n"); |
return 0; |
goto out_unlock; |
} |
I915_WRITE(SBI_ADDR, (reg << 16)); |
1560,10 → 1573,14 |
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, |
100)) { |
DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); |
return 0; |
goto out_unlock; |
} |
return I915_READ(SBI_DATA); |
value = I915_READ(SBI_DATA); |
out_unlock: |
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags); |
return value; |
} |
/** |
1695,8 → 1712,8 |
* make the BPC in transcoder be consistent with |
* that in pipeconf reg. |
*/ |
val &= ~PIPECONF_BPC_MASK; |
val |= pipeconf_val & PIPECONF_BPC_MASK; |
val &= ~PIPE_BPC_MASK; |
val |= pipeconf_val & PIPE_BPC_MASK; |
} |
val &= ~TRANS_INTERLACE_MASK; |
1723,7 → 1740,7 |
BUG_ON(dev_priv->info->gen < 5); |
/* FDI must be feeding us bits for PCH ports */ |
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder); |
assert_fdi_tx_enabled(dev_priv, cpu_transcoder); |
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); |
/* Workaround: set timing override bit. */ |
1811,11 → 1828,11 |
{ |
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
pipe); |
enum pipe pch_transcoder; |
enum transcoder pch_transcoder; |
int reg; |
u32 val; |
if (HAS_PCH_LPT(dev_priv->dev)) |
if (IS_HASWELL(dev_priv->dev)) |
pch_transcoder = TRANSCODER_A; |
else |
pch_transcoder = pipe; |
1831,8 → 1848,7 |
if (pch_port) { |
/* if driving the PCH, we need FDI enabled */ |
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder); |
assert_fdi_tx_pll_enabled(dev_priv, |
(enum pipe) cpu_transcoder); |
assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder); |
} |
/* FIXME: assert CPU port conditions for SNB+ */ |
} |
1989,12 → 2005,7 |
* framebuffer compression. For simplicity, we always install |
* a fence as the cost is not that onerous. |
*/ |
ret = i915_gem_object_get_fence(obj); |
if (ret) |
goto err_unpin; |
i915_gem_object_pin_fence(obj); |
dev_priv->mm.interruptible = true; |
return 0; |
2013,30 → 2024,19 |
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel |
* is assumed to be a power-of-two. */ |
unsigned long intel_gen4_compute_page_offset(int *x, int *y, |
unsigned int tiling_mode, |
unsigned int cpp, |
unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, |
unsigned int bpp, |
unsigned int pitch) |
{ |
if (tiling_mode != I915_TILING_NONE) { |
unsigned int tile_rows, tiles; |
int tile_rows, tiles; |
tile_rows = *y / 8; |
*y %= 8; |
tiles = *x / (512/bpp); |
*x %= 512/bpp; |
tiles = *x / (512/cpp); |
*x %= 512/cpp; |
return tile_rows * pitch * 8 + tiles * 4096; |
} else { |
unsigned int offset; |
offset = *y * pitch + *x * cpp; |
*y = 0; |
*x = (offset & 4095) / cpp; |
return offset & -4096; |
} |
} |
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
int x, int y) |
2112,7 → 2112,7 |
if (INTEL_INFO(dev)->gen >= 4) { |
intel_crtc->dspaddr_offset = |
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
intel_gen4_compute_offset_xtiled(&x, &y, |
fb->bits_per_pixel / 8, |
fb->pitches[0]); |
linear_offset -= intel_crtc->dspaddr_offset; |
2193,9 → 2193,9 |
return -EINVAL; |
} |
if (obj->tiling_mode != I915_TILING_NONE) |
dspcntr |= DISPPLANE_TILED; |
else |
// if (obj->tiling_mode != I915_TILING_NONE) |
// dspcntr |= DISPPLANE_TILED; |
// else |
dspcntr &= ~DISPPLANE_TILED; |
/* must disable */ |
2205,7 → 2205,7 |
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8); |
intel_crtc->dspaddr_offset = |
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
intel_gen4_compute_offset_xtiled(&x, &y, |
fb->bits_per_pixel / 8, |
fb->pitches[0]); |
linear_offset -= intel_crtc->dspaddr_offset; |
2250,6 → 2250,10 |
bool was_interruptible = dev_priv->mm.interruptible; |
int ret; |
wait_event(dev_priv->pending_flip_queue, |
atomic_read(&dev_priv->mm.wedged) || |
atomic_read(&obj->pending_flip) == 0); |
/* Big Hammer, we also need to ensure that any pending |
* MI_WAIT_FOR_EVENT inside a user batch buffer on the |
* current scanout is retired before unpinning the old |
2326,6 → 2330,43 |
return 0; |
} |
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 dpa_ctl; |
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
dpa_ctl = I915_READ(DP_A); |
dpa_ctl &= ~DP_PLL_FREQ_MASK; |
if (clock < 200000) { |
u32 temp; |
dpa_ctl |= DP_PLL_FREQ_160MHZ; |
/* workaround for 160Mhz: |
1) program 0x4600c bits 15:0 = 0x8124 |
2) program 0x46010 bit 0 = 1 |
3) program 0x46034 bit 24 = 1 |
4) program 0x64000 bit 14 = 1 |
*/ |
temp = I915_READ(0x4600c); |
temp &= 0xffff0000; |
I915_WRITE(0x4600c, temp | 0x8124); |
temp = I915_READ(0x46010); |
I915_WRITE(0x46010, temp | 1); |
temp = I915_READ(0x46034); |
I915_WRITE(0x46034, temp | (1 << 24)); |
} else { |
dpa_ctl |= DP_PLL_FREQ_270MHZ; |
} |
I915_WRITE(DP_A, dpa_ctl); |
POSTING_READ(DP_A); |
udelay(500); |
} |
static void intel_fdi_normal_train(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
2754,7 → 2795,7 |
temp = I915_READ(reg); |
temp &= ~((0x7 << 19) | (0x7 << 16)); |
temp |= (intel_crtc->fdi_lanes - 1) << 19; |
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); |
POSTING_READ(reg); |
2767,6 → 2808,9 |
POSTING_READ(reg); |
udelay(200); |
/* On Haswell, the PLL configuration for ports and pipes is handled |
* separately, as part of DDI setup */ |
if (!IS_HASWELL(dev)) { |
/* Enable CPU FDI TX PLL, always on for Ironlake */ |
reg = FDI_TX_CTL(pipe); |
temp = I915_READ(reg); |
2777,6 → 2821,7 |
udelay(100); |
} |
} |
} |
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) |
{ |
2824,7 → 2869,7 |
reg = FDI_RX_CTL(pipe); |
temp = I915_READ(reg); |
temp &= ~(0x7 << 16); |
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
I915_WRITE(reg, temp & ~FDI_RX_ENABLE); |
POSTING_READ(reg); |
2853,7 → 2898,7 |
} |
/* BPC in FDI rx is consistent with that in PIPECONF */ |
temp &= ~(0x07 << 16); |
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11; |
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
I915_WRITE(reg, temp); |
POSTING_READ(reg); |
2864,12 → 2909,10 |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
unsigned long flags; |
bool pending; |
if (i915_reset_in_progress(&dev_priv->gpu_error) || |
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) |
if (atomic_read(&dev_priv->mm.wedged)) |
return false; |
spin_lock_irqsave(&dev->event_lock, flags); |
2888,8 → 2931,6 |
if (crtc->fb == NULL) |
return; |
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); |
wait_event(dev_priv->pending_flip_queue, |
!intel_crtc_has_pending_flip(crtc)); |
2933,8 → 2974,6 |
u32 divsel, phaseinc, auxdiv, phasedir = 0; |
u32 temp; |
mutex_lock(&dev_priv->dpio_lock); |
/* It is necessary to ungate the pixclk gate prior to programming |
* the divisors, and gate it back when it is done. |
*/ |
3009,8 → 3048,6 |
udelay(24); |
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE); |
mutex_unlock(&dev_priv->dpio_lock); |
} |
/* |
3091,7 → 3128,7 |
if (HAS_PCH_CPT(dev) && |
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { |
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; |
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; |
reg = TRANS_DP_CTL(pipe); |
temp = I915_READ(reg); |
temp &= ~(TRANS_DP_PORT_SEL_MASK | |
3565,7 → 3602,7 |
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might |
* start using it. */ |
intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe; |
intel_crtc->cpu_transcoder = intel_crtc->pipe; |
intel_ddi_put_crtc_pll(crtc); |
} |
3588,30 → 3625,6 |
*/ |
} |
/** |
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware |
* cursor plane briefly if not already running after enabling the display |
* plane. |
* This workaround avoids occasional blank screens when self refresh is |
* enabled. |
*/ |
static void |
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe) |
{ |
u32 cntl = I915_READ(CURCNTR(pipe)); |
if ((cntl & CURSOR_MODE) == 0) { |
u32 fw_bcl_self = I915_READ(FW_BLC_SELF); |
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN); |
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX); |
intel_wait_for_vblank(dev_priv->dev, pipe); |
I915_WRITE(CURCNTR(pipe), cntl); |
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe))); |
I915_WRITE(FW_BLC_SELF, fw_bcl_self); |
} |
} |
static void i9xx_crtc_enable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
3630,15 → 3643,8 |
intel_update_watermarks(dev); |
intel_enable_pll(dev_priv, pipe); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
if (encoder->pre_enable) |
encoder->pre_enable(encoder); |
intel_enable_pipe(dev_priv, pipe, false); |
intel_enable_plane(dev_priv, plane, pipe); |
if (IS_G4X(dev)) |
g4x_fixup_plane(dev_priv, pipe); |
intel_crtc_load_lut(crtc); |
intel_update_fbc(dev); |
3659,7 → 3665,6 |
struct intel_encoder *encoder; |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
u32 pctl; |
if (!intel_crtc->active) |
3679,13 → 3684,6 |
intel_disable_plane(dev_priv, plane, pipe); |
intel_disable_pipe(dev_priv, pipe); |
/* Disable pannel fitter if it is on this pipe. */ |
pctl = I915_READ(PFIT_CONTROL); |
if ((pctl & PFIT_ENABLE) && |
((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe) |
I915_WRITE(PFIT_CONTROL, 0); |
intel_disable_pll(dev_priv, pipe); |
intel_crtc->active = false; |
3752,17 → 3750,19 |
intel_crtc_update_sarea(crtc, enable); |
} |
static void intel_crtc_noop(struct drm_crtc *crtc) |
{ |
} |
static void intel_crtc_disable(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_connector *connector; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
/* crtc should still be enabled when we disable it. */ |
WARN_ON(!crtc->enabled); |
intel_crtc->eld_vld = false; |
dev_priv->display.crtc_disable(crtc); |
intel_crtc_update_sarea(crtc, false); |
dev_priv->display.off(crtc); |
3800,6 → 3800,10 |
} |
} |
void intel_encoder_noop(struct drm_encoder *encoder) |
{ |
} |
void intel_encoder_destroy(struct drm_encoder *encoder) |
{ |
struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
3991,8 → 3995,16 |
return 133000; |
} |
struct fdi_m_n { |
u32 tu; |
u32 gmch_m; |
u32 gmch_n; |
u32 link_m; |
u32 link_n; |
}; |
static void |
intel_reduce_ratio(uint32_t *num, uint32_t *den) |
fdi_reduce_ratio(u32 *num, u32 *den) |
{ |
while (*num > 0xffffff || *den > 0xffffff) { |
*num >>= 1; |
4000,18 → 4012,20 |
} |
} |
void |
intel_link_compute_m_n(int bits_per_pixel, int nlanes, |
int pixel_clock, int link_clock, |
struct intel_link_m_n *m_n) |
static void |
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
int link_clock, struct fdi_m_n *m_n) |
{ |
m_n->tu = 64; |
m_n->tu = 64; /* default size */ |
/* BUG_ON(pixel_clock > INT_MAX / 36); */ |
m_n->gmch_m = bits_per_pixel * pixel_clock; |
m_n->gmch_n = link_clock * nlanes * 8; |
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
m_n->link_m = pixel_clock; |
m_n->link_n = link_clock; |
intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
} |
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4258,6 → 4272,51 |
} |
} |
static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
u32 temp; |
temp = I915_READ(LVDS); |
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
if (pipe == 1) { |
temp |= LVDS_PIPEB_SELECT; |
} else { |
temp &= ~LVDS_PIPEB_SELECT; |
} |
/* set the corresponsding LVDS_BORDER bit */ |
temp |= dev_priv->lvds_border_bits; |
/* Set the B0-B3 data pairs corresponding to whether we're going to |
* set the DPLLs for dual-channel mode or not. |
*/ |
if (clock->p2 == 7) |
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
else |
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
* appropriately here, but we need to look more thoroughly into how |
* panels behave in the two modes. |
*/ |
/* set the dithering flag on LVDS as needed */ |
if (INTEL_INFO(dev)->gen >= 4) { |
if (dev_priv->lvds_dither) |
temp |= LVDS_ENABLE_DITHER; |
else |
temp &= ~LVDS_ENABLE_DITHER; |
} |
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
temp |= LVDS_HSYNC_POLARITY; |
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
temp |= LVDS_VSYNC_POLARITY; |
I915_WRITE(LVDS, temp); |
} |
static void vlv_update_pll(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
4273,8 → 4332,6 |
bool is_sdvo; |
u32 temp; |
mutex_lock(&dev_priv->dpio_lock); |
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) || |
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI); |
4358,8 → 4415,6 |
temp |= (1 << 21); |
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp); |
} |
mutex_unlock(&dev_priv->dpio_lock); |
} |
static void i9xx_update_pll(struct drm_crtc *crtc, |
4371,7 → 4426,6 |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *encoder; |
int pipe = intel_crtc->pipe; |
u32 dpll; |
bool is_sdvo; |
4440,9 → 4494,12 |
POSTING_READ(DPLL(pipe)); |
udelay(150); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
if (encoder->pre_pll_enable) |
encoder->pre_pll_enable(encoder); |
/* The LVDS pin pair needs to be on before the DPLLs are enabled. |
* This is an exception to the general rule that mode_set doesn't turn |
* things on. |
*/ |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
intel_update_lvds(crtc, clock, adjusted_mode); |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) |
intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4481,7 → 4538,6 |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_encoder *encoder; |
int pipe = intel_crtc->pipe; |
u32 dpll; |
4515,9 → 4571,12 |
POSTING_READ(DPLL(pipe)); |
udelay(150); |
for_each_encoder_on_crtc(dev, crtc, encoder) |
if (encoder->pre_pll_enable) |
encoder->pre_pll_enable(encoder); |
/* The LVDS pin pair needs to be on before the DPLLs are enabled. |
* This is an exception to the general rule that mode_set doesn't turn |
* things on. |
*/ |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
intel_update_lvds(crtc, clock, adjusted_mode); |
I915_WRITE(DPLL(pipe), dpll); |
4707,10 → 4766,10 |
} |
/* default to 8bpc */ |
pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN); |
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); |
if (is_dp) { |
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
pipeconf |= PIPECONF_6BPC | |
pipeconf |= PIPECONF_BPP_6 | |
PIPECONF_DITHER_EN | |
PIPECONF_DITHER_TYPE_SP; |
} |
4718,7 → 4777,7 |
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) { |
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
pipeconf |= PIPECONF_6BPC | |
pipeconf |= PIPECONF_BPP_6 | |
PIPECONF_ENABLE | |
I965_PIPECONF_ACTIVE; |
} |
4905,8 → 4964,6 |
if (!has_vga) |
return; |
mutex_lock(&dev_priv->dpio_lock); |
/* XXX: Rip out SDV support once Haswell ships for real. */ |
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00) |
is_sdv = true; |
5049,8 → 5106,6 |
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK); |
tmp |= SBI_DBUFF0_ENABLE; |
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK); |
mutex_unlock(&dev_priv->dpio_lock); |
} |
/* |
5105,19 → 5160,19 |
val = I915_READ(PIPECONF(pipe)); |
val &= ~PIPECONF_BPC_MASK; |
val &= ~PIPE_BPC_MASK; |
switch (intel_crtc->bpp) { |
case 18: |
val |= PIPECONF_6BPC; |
val |= PIPE_6BPC; |
break; |
case 24: |
val |= PIPECONF_8BPC; |
val |= PIPE_8BPC; |
break; |
case 30: |
val |= PIPECONF_10BPC; |
val |= PIPE_10BPC; |
break; |
case 36: |
val |= PIPECONF_12BPC; |
val |= PIPE_12BPC; |
break; |
default: |
/* Case prevented by intel_choose_pipe_bpp_dither. */ |
5134,80 → 5189,10 |
else |
val |= PIPECONF_PROGRESSIVE; |
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
val |= PIPECONF_COLOR_RANGE_SELECT; |
else |
val &= ~PIPECONF_COLOR_RANGE_SELECT; |
I915_WRITE(PIPECONF(pipe), val); |
POSTING_READ(PIPECONF(pipe)); |
} |
/* |
* Set up the pipe CSC unit. |
* |
* Currently only full range RGB to limited range RGB conversion |
* is supported, but eventually this should handle various |
* RGB<->YCbCr scenarios as well. |
*/ |
static void intel_set_pipe_csc(struct drm_crtc *crtc, |
const struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
uint16_t coeff = 0x7800; /* 1.0 */ |
/* |
* TODO: Check what kind of values actually come out of the pipe |
* with these coeff/postoff values and adjust to get the best |
* accuracy. Perhaps we even need to take the bpc value into |
* consideration. |
*/ |
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */ |
/* |
* GY/GU and RY/RU should be the other way around according |
* to BSpec, but reality doesn't agree. Just set them up in |
* a way that results in the correct picture. |
*/ |
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16); |
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0); |
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff); |
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0); |
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0); |
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16); |
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0); |
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0); |
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0); |
if (INTEL_INFO(dev)->gen > 6) { |
uint16_t postoff = 0; |
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
postoff = (16 * (1 << 13) / 255) & 0x1fff; |
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff); |
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff); |
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff); |
I915_WRITE(PIPE_CSC_MODE(pipe), 0); |
} else { |
uint32_t mode = CSC_MODE_YUV_TO_RGB; |
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
mode |= CSC_BLACK_SCREEN_OFFSET; |
I915_WRITE(PIPE_CSC_MODE(pipe), mode); |
} |
} |
static void haswell_set_pipeconf(struct drm_crtc *crtc, |
struct drm_display_mode *adjusted_mode, |
bool dither) |
5398,7 → 5383,7 |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
struct intel_encoder *intel_encoder, *edp_encoder = NULL; |
struct intel_link_m_n m_n = {0}; |
struct fdi_m_n m_n = {0}; |
int target_clock, pixel_multiplier, lane, link_bw; |
bool is_dp = false, is_cpu_edp = false; |
5450,7 → 5435,8 |
if (pixel_multiplier > 1) |
link_bw *= pixel_multiplier; |
intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n); |
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
&m_n); |
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m); |
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n); |
5503,7 → 5489,7 |
if (is_lvds) { |
if ((intel_panel_use_ssc(dev_priv) && |
dev_priv->lvds_ssc_freq == 100) || |
intel_is_dual_link_lvds(dev)) |
(I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) |
factor = 25; |
} else if (is_sdvo && is_tv) |
factor = 20; |
5578,6 → 5564,7 |
bool ok, has_reduced_clock = false; |
bool is_lvds = false, is_dp = false, is_cpu_edp = false; |
struct intel_encoder *encoder; |
u32 temp; |
int ret; |
bool dither, fdi_config_ok; |
5641,13 → 5628,55 |
} else |
intel_put_pch_pll(intel_crtc); |
if (is_dp && !is_cpu_edp) |
/* The LVDS pin pair needs to be on before the DPLLs are enabled. |
* This is an exception to the general rule that mode_set doesn't turn |
* things on. |
*/ |
if (is_lvds) { |
temp = I915_READ(PCH_LVDS); |
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
if (HAS_PCH_CPT(dev)) { |
temp &= ~PORT_TRANS_SEL_MASK; |
temp |= PORT_TRANS_SEL_CPT(pipe); |
} else { |
if (pipe == 1) |
temp |= LVDS_PIPEB_SELECT; |
else |
temp &= ~LVDS_PIPEB_SELECT; |
} |
/* set the corresponsding LVDS_BORDER bit */ |
temp |= dev_priv->lvds_border_bits; |
/* Set the B0-B3 data pairs corresponding to whether we're going to |
* set the DPLLs for dual-channel mode or not. |
*/ |
if (clock.p2 == 7) |
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
else |
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
* appropriately here, but we need to look more thoroughly into how |
* panels behave in the two modes. |
*/ |
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
temp |= LVDS_HSYNC_POLARITY; |
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
temp |= LVDS_VSYNC_POLARITY; |
I915_WRITE(PCH_LVDS, temp); |
} |
if (is_dp && !is_cpu_edp) { |
intel_dp_set_m_n(crtc, mode, adjusted_mode); |
} else { |
/* For non-DP output, clear any trans DP clock recovery setting.*/ |
I915_WRITE(TRANSDATA_M1(pipe), 0); |
I915_WRITE(TRANSDATA_N1(pipe), 0); |
I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
} |
for_each_encoder_on_crtc(dev, crtc, encoder) |
if (encoder->pre_pll_enable) |
encoder->pre_pll_enable(encoder); |
if (intel_crtc->pch_pll) { |
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
5681,6 → 5710,9 |
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc); |
if (is_cpu_edp) |
ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
ironlake_set_pipeconf(crtc, adjusted_mode, dither); |
intel_wait_for_vblank(dev, pipe); |
5698,35 → 5730,6 |
return fdi_config_ok ? ret : -EINVAL; |
} |
static void haswell_modeset_global_resources(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
bool enable = false; |
struct intel_crtc *crtc; |
struct intel_encoder *encoder; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) { |
if (crtc->pipe != PIPE_A && crtc->base.enabled) |
enable = true; |
/* XXX: Should check for edp transcoder here, but thanks to init |
* sequence that's not yet available. Just in case desktop eDP |
* on PORT D is possible on haswell, too. */ |
} |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
base.head) { |
if (encoder->type != INTEL_OUTPUT_EDP && |
encoder->connectors_active) |
enable = true; |
} |
/* Even the eDP panel fitter is outside the always-on well. */ |
if (dev_priv->pch_pf_size) |
enable = true; |
intel_set_power_well(dev, enable); |
} |
static int haswell_crtc_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, |
5739,13 → 5742,20 |
int pipe = intel_crtc->pipe; |
int plane = intel_crtc->plane; |
int num_connectors = 0; |
bool is_dp = false, is_cpu_edp = false; |
intel_clock_t clock, reduced_clock; |
u32 dpll = 0, fp = 0, fp2 = 0; |
bool ok, has_reduced_clock = false; |
bool is_lvds = false, is_dp = false, is_cpu_edp = false; |
struct intel_encoder *encoder; |
u32 temp; |
int ret; |
bool dither; |
for_each_encoder_on_crtc(dev, crtc, encoder) { |
switch (encoder->type) { |
case INTEL_OUTPUT_LVDS: |
is_lvds = true; |
break; |
case INTEL_OUTPUT_DISPLAYPORT: |
is_dp = true; |
break; |
5779,6 → 5789,16 |
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock)) |
return -EINVAL; |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock, |
&has_reduced_clock, |
&reduced_clock); |
if (!ok) { |
DRM_ERROR("Couldn't find PLL settings for mode!\n"); |
return -EINVAL; |
} |
} |
/* Ensure that the cursor is valid for the new mode before changing... */ |
// intel_crtc_update_cursor(crtc, true); |
5785,26 → 5805,131 |
/* determine panel color depth */ |
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp, |
adjusted_mode); |
if (is_lvds && dev_priv->lvds_dither) |
dither = true; |
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
drm_mode_debug_printmodeline(mode); |
if (is_dp && !is_cpu_edp) |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
if (has_reduced_clock) |
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
reduced_clock.m2; |
dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, |
fp); |
/* CPU eDP is the only output that doesn't need a PCH PLL of its |
* own on pre-Haswell/LPT generation */ |
if (!is_cpu_edp) { |
struct intel_pch_pll *pll; |
pll = intel_get_pch_pll(intel_crtc, dpll, fp); |
if (pll == NULL) { |
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n", |
pipe); |
return -EINVAL; |
} |
} else |
intel_put_pch_pll(intel_crtc); |
/* The LVDS pin pair needs to be on before the DPLLs are |
* enabled. This is an exception to the general rule that |
* mode_set doesn't turn things on. |
*/ |
if (is_lvds) { |
temp = I915_READ(PCH_LVDS); |
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
if (HAS_PCH_CPT(dev)) { |
temp &= ~PORT_TRANS_SEL_MASK; |
temp |= PORT_TRANS_SEL_CPT(pipe); |
} else { |
if (pipe == 1) |
temp |= LVDS_PIPEB_SELECT; |
else |
temp &= ~LVDS_PIPEB_SELECT; |
} |
/* set the corresponsding LVDS_BORDER bit */ |
temp |= dev_priv->lvds_border_bits; |
/* Set the B0-B3 data pairs corresponding to whether |
* we're going to set the DPLLs for dual-channel mode or |
* not. |
*/ |
if (clock.p2 == 7) |
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
else |
temp &= ~(LVDS_B0B3_POWER_UP | |
LVDS_CLKB_POWER_UP); |
/* It would be nice to set 24 vs 18-bit mode |
* (LVDS_A3_POWER_UP) appropriately here, but we need to |
* look more thoroughly into how panels behave in the |
* two modes. |
*/ |
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
temp |= LVDS_HSYNC_POLARITY; |
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) |
temp |= LVDS_VSYNC_POLARITY; |
I915_WRITE(PCH_LVDS, temp); |
} |
} |
if (is_dp && !is_cpu_edp) { |
intel_dp_set_m_n(crtc, mode, adjusted_mode); |
} else { |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
/* For non-DP output, clear any trans DP clock recovery |
* setting.*/ |
I915_WRITE(TRANSDATA_M1(pipe), 0); |
I915_WRITE(TRANSDATA_N1(pipe), 0); |
I915_WRITE(TRANSDPLINK_M1(pipe), 0); |
I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
} |
} |
intel_crtc->lowfreq_avail = false; |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) { |
if (intel_crtc->pch_pll) { |
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
/* Wait for the clocks to stabilize. */ |
POSTING_READ(intel_crtc->pch_pll->pll_reg); |
udelay(150); |
/* The pixel multiplier can only be updated once the |
* DPLL is enabled and the clocks are stable. |
* |
* So write it again. |
*/ |
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll); |
} |
if (intel_crtc->pch_pll) { |
if (is_lvds && has_reduced_clock && i915_powersave) { |
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2); |
intel_crtc->lowfreq_avail = true; |
} else { |
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp); |
} |
} |
} |
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode); |
if (!is_dp || is_cpu_edp) |
ironlake_set_m_n(crtc, mode, adjusted_mode); |
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) |
if (is_cpu_edp) |
ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
haswell_set_pipeconf(crtc, adjusted_mode, dither); |
intel_set_pipe_csc(crtc, adjusted_mode); |
/* Set up the display plane register */ |
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE); |
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE); |
POSTING_READ(DSPCNTR(plane)); |
ret = intel_pipe_set_base(crtc, x, y, fb); |
5926,7 → 6051,6 |
struct drm_i915_private *dev_priv = connector->dev->dev_private; |
uint8_t *eld = connector->eld; |
struct drm_device *dev = crtc->dev; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
uint32_t eldv; |
uint32_t i; |
int len; |
5968,7 → 6092,6 |
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe)); |
eldv = AUDIO_ELD_VALID_A << (pipe * 4); |
intel_crtc->eld_vld = true; |
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); |
6205,8 → 6328,6 |
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); |
cntl |= CURSOR_MODE_DISABLE; |
} |
if (IS_HASWELL(dev)) |
cntl |= CURSOR_PIPE_CSC_ENABLE; |
I915_WRITE(CURCNTR_IVB(pipe), cntl); |
intel_crtc->cursor_visible = visible; |
6564,8 → 6685,6 |
if (encoder->crtc) { |
crtc = encoder->crtc; |
mutex_lock(&crtc->mutex); |
old->dpms_mode = connector->dpms; |
old->load_detect_temp = false; |
6595,7 → 6714,6 |
return false; |
} |
mutex_lock(&crtc->mutex); |
intel_encoder->new_crtc = to_intel_crtc(crtc); |
to_intel_connector(connector)->new_encoder = intel_encoder; |
6623,15 → 6741,13 |
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); |
if (IS_ERR(fb)) { |
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); |
mutex_unlock(&crtc->mutex); |
return false; |
} |
if (intel_set_mode(crtc, mode, 0, 0, fb)) { |
if (!intel_set_mode(crtc, mode, 0, 0, fb)) { |
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
if (old->release_fb) |
old->release_fb->funcs->destroy(old->release_fb); |
mutex_unlock(&crtc->mutex); |
return false; |
} |
6646,7 → 6762,6 |
struct intel_encoder *intel_encoder = |
intel_attached_encoder(connector); |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_crtc *crtc = encoder->crtc; |
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
connector->base.id, drm_get_connector_name(connector), |
6653,16 → 6768,15 |
encoder->base.id, drm_get_encoder_name(encoder)); |
if (old->load_detect_temp) { |
struct drm_crtc *crtc = encoder->crtc; |
to_intel_connector(connector)->new_encoder = NULL; |
intel_encoder->new_crtc = NULL; |
intel_set_mode(crtc, NULL, 0, 0, NULL); |
if (old->release_fb) { |
drm_framebuffer_unregister_private(old->release_fb); |
drm_framebuffer_unreference(old->release_fb); |
} |
if (old->release_fb) |
old->release_fb->funcs->destroy(old->release_fb); |
mutex_unlock(&crtc->mutex); |
return; |
} |
6669,8 → 6783,6 |
/* Switch crtc and encoder back off if necessary */ |
if (old->dpms_mode != DRM_MODE_DPMS_ON) |
connector->funcs->dpms(connector, old->dpms_mode); |
mutex_unlock(&crtc->mutex); |
} |
/* Returns the clock of the currently programmed mode of the given pipe. */ |
6866,8 → 6978,15 |
void intel_mark_idle(struct drm_device *dev) |
{ |
} |
void intel_mark_fb_busy(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_crtc *crtc; |
ENTER(); |
if (!i915_powersave) |
return; |
6875,11 → 6994,12 |
if (!crtc->fb) |
continue; |
intel_decrease_pllclock(crtc); |
if (to_intel_framebuffer(crtc->fb)->obj == obj) |
intel_increase_pllclock(crtc); |
} |
} |
void intel_mark_fb_busy(struct drm_i915_gem_object *obj) |
void intel_mark_fb_idle(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_crtc *crtc; |
6892,7 → 7012,7 |
continue; |
if (to_intel_framebuffer(crtc->fb)->obj == obj) |
intel_increase_pllclock(crtc); |
intel_decrease_pllclock(crtc); |
} |
} |
6977,7 → 7097,9 |
obj = work->old_fb_obj; |
wake_up_all(&dev_priv->pending_flip_queue); |
atomic_clear_mask(1 << intel_crtc->plane, |
&obj->pending_flip.counter); |
wake_up(&dev_priv->pending_flip_queue); |
queue_work(dev_priv->wq, &work->work); |
7273,8 → 7395,8 |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_framebuffer *old_fb = crtc->fb; |
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj; |
struct intel_framebuffer *intel_fb; |
struct drm_i915_gem_object *obj; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
struct intel_unpin_work *work; |
unsigned long flags; |
7299,7 → 7421,8 |
work->event = event; |
work->crtc = crtc; |
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj; |
intel_fb = to_intel_framebuffer(crtc->fb); |
work->old_fb_obj = intel_fb->obj; |
INIT_WORK(&work->work, intel_unpin_work_fn); |
ret = drm_vblank_get(dev, intel_crtc->pipe); |
7319,6 → 7442,9 |
intel_crtc->unpin_work = work; |
spin_unlock_irqrestore(&dev->event_lock, flags); |
intel_fb = to_intel_framebuffer(fb); |
obj = intel_fb->obj; |
if (atomic_read(&intel_crtc->unpin_work_count) >= 2) |
flush_workqueue(dev_priv->wq); |
7336,8 → 7462,11 |
work->enable_stall_check = true; |
/* Block clients from rendering to the new back buffer until |
* the flip occurs and the object is no longer visible. |
*/ |
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
atomic_inc(&intel_crtc->unpin_work_count); |
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); |
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
if (ret) |
7353,7 → 7482,7 |
cleanup_pending: |
atomic_dec(&intel_crtc->unpin_work_count); |
crtc->fb = old_fb; |
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
drm_gem_object_unreference(&work->old_fb_obj->base); |
drm_gem_object_unreference(&obj->base); |
mutex_unlock(&dev->struct_mutex); |
7375,6 → 7504,7 |
static struct drm_crtc_helper_funcs intel_helper_funcs = { |
.mode_set_base_atomic = intel_pipe_set_base_atomic, |
.load_lut = intel_crtc_load_lut, |
.disable = intel_crtc_noop, |
}; |
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder) |
7764,22 → 7894,17 |
} |
} |
int intel_set_mode(struct drm_crtc *crtc, |
bool intel_set_mode(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
int x, int y, struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = crtc->dev; |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode; |
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode; |
struct intel_crtc *intel_crtc; |
unsigned disable_pipes, prepare_pipes, modeset_pipes; |
int ret = 0; |
bool ret = true; |
saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL); |
if (!saved_mode) |
return -ENOMEM; |
saved_hwmode = saved_mode + 1; |
intel_modeset_affected_pipes(crtc, &modeset_pipes, |
&prepare_pipes, &disable_pipes); |
7789,8 → 7914,8 |
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) |
intel_crtc_disable(&intel_crtc->base); |
*saved_hwmode = crtc->hwmode; |
*saved_mode = crtc->mode; |
saved_hwmode = crtc->hwmode; |
saved_mode = crtc->mode; |
/* Hack: Because we don't (yet) support global modeset on multiple |
* crtcs, we don't keep track of the new mode for more than one crtc. |
7801,8 → 7926,7 |
if (modeset_pipes) { |
adjusted_mode = intel_modeset_adjusted_mode(crtc, mode); |
if (IS_ERR(adjusted_mode)) { |
ret = PTR_ERR(adjusted_mode); |
goto out; |
return false; |
} |
} |
7828,10 → 7952,10 |
* on the DPLL. |
*/ |
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) { |
ret = intel_crtc_mode_set(&intel_crtc->base, |
ret = !intel_crtc_mode_set(&intel_crtc->base, |
mode, adjusted_mode, |
x, y, fb); |
if (ret) |
if (!ret) |
goto done; |
} |
7853,23 → 7977,16 |
/* FIXME: add subpixel order */ |
done: |
drm_mode_destroy(dev, adjusted_mode); |
if (ret && crtc->enabled) { |
crtc->hwmode = *saved_hwmode; |
crtc->mode = *saved_mode; |
if (!ret && crtc->enabled) { |
crtc->hwmode = saved_hwmode; |
crtc->mode = saved_mode; |
} else { |
intel_modeset_check_state(dev); |
} |
out: |
kfree(saved_mode); |
return ret; |
} |
void intel_crtc_restore_mode(struct drm_crtc *crtc) |
{ |
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb); |
} |
#undef for_each_intel_crtc_masked |
static void intel_set_config_free(struct intel_set_config *config) |
7982,7 → 8099,7 |
struct intel_encoder *encoder; |
int count, ro; |
/* The upper layers ensure that we either disable a crtc or have a list |
/* The upper layers ensure that we either disabl a crtc or have a list |
* of connectors. For paranoia, double-check this. */ |
WARN_ON(!set->fb && (set->num_connectors != 0)); |
WARN_ON(set->fb && (set->num_connectors == 0)); |
8084,10 → 8201,15 |
BUG_ON(!set->crtc); |
BUG_ON(!set->crtc->helper_private); |
/* Enforce sane interface api - has been abused by the fb helper. */ |
BUG_ON(!set->mode && set->fb); |
BUG_ON(set->fb && set->num_connectors == 0); |
if (!set->mode) |
set->fb = NULL; |
/* The fb helper likes to play gross jokes with ->mode_set_config. |
* Unfortunately the crtc helper doesn't do much at all for this case, |
* so we have to cope with this madness until the fb helper is fixed up. */ |
if (set->fb && set->num_connectors == 0) |
return 0; |
if (set->fb) { |
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n", |
set->crtc->base.id, set->fb->base.id, |
8130,11 → 8252,11 |
drm_mode_debug_printmodeline(set->mode); |
} |
ret = intel_set_mode(set->crtc, set->mode, |
set->x, set->y, set->fb); |
if (ret) { |
DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n", |
set->crtc->base.id, ret); |
if (!intel_set_mode(set->crtc, set->mode, |
set->x, set->y, set->fb)) { |
DRM_ERROR("failed to set mode on [CRTC:%d]\n", |
set->crtc->base.id); |
ret = -EINVAL; |
goto fail; |
} |
} else if (config->fb_changed) { |
8151,7 → 8273,7 |
/* Try to restore the config */ |
if (config->mode_changed && |
intel_set_mode(save_set.crtc, save_set.mode, |
!intel_set_mode(save_set.crtc, save_set.mode, |
save_set.x, save_set.y, save_set.fb)) |
DRM_ERROR("failed to restore config after modeset failure\n"); |
8171,7 → 8293,7 |
static void intel_cpu_pll_init(struct drm_device *dev) |
{ |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
intel_ddi_pll_init(dev); |
} |
8304,10 → 8426,11 |
I915_WRITE(PFIT_CONTROL, 0); |
} |
if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) |
if (!(IS_HASWELL(dev) && |
(I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES))) |
intel_crt_init(dev); |
if (HAS_DDI(dev)) { |
if (IS_HASWELL(dev)) { |
int found; |
/* Haswell uses DDI functions to detect digital outputs */ |
8354,18 → 8477,23 |
if (I915_READ(PCH_DP_D) & DP_DETECTED) |
intel_dp_init(dev, PCH_DP_D, PORT_D); |
} else if (IS_VALLEYVIEW(dev)) { |
int found; |
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */ |
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) |
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); |
if (I915_READ(DP_C) & DP_DETECTED) |
intel_dp_init(dev, DP_C, PORT_C); |
if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) { |
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B); |
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) |
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); |
if (I915_READ(SDVOB) & PORT_DETECTED) { |
/* SDVOB multiplex with HDMIB */ |
found = intel_sdvo_init(dev, SDVOB, true); |
if (!found) |
intel_hdmi_init(dev, SDVOB, PORT_B); |
if (!found && (I915_READ(DP_B) & DP_DETECTED)) |
intel_dp_init(dev, DP_B, PORT_B); |
} |
if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED) |
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C); |
if (I915_READ(SDVOC) & PORT_DETECTED) |
intel_hdmi_init(dev, SDVOC, PORT_C); |
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
bool found = false; |
8507,9 → 8635,6 |
if (mode_cmd->offsets[0] != 0) |
return -EINVAL; |
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
intel_fb->obj = obj; |
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
if (ret) { |
DRM_ERROR("framebuffer init failed %d\n", ret); |
8516,6 → 8641,8 |
return ret; |
} |
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); |
intel_fb->obj = obj; |
return 0; |
} |
8522,7 → 8649,7 |
static const struct drm_mode_config_funcs intel_mode_funcs = { |
.fb_create = NULL /*intel_user_framebuffer_create*/, |
.output_poll_changed = intel_fb_output_poll_changed, |
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/, |
}; |
/* Set up chip specific display functions */ |
8531,7 → 8658,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* We always want a DPMS function */ |
if (HAS_DDI(dev)) { |
if (IS_HASWELL(dev)) { |
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; |
dev_priv->display.crtc_enable = haswell_crtc_enable; |
dev_priv->display.crtc_disable = haswell_crtc_disable; |
8593,9 → 8720,8 |
} else if (IS_HASWELL(dev)) { |
dev_priv->display.fdi_link_train = hsw_fdi_link_train; |
dev_priv->display.write_eld = haswell_write_eld; |
dev_priv->display.modeset_global_resources = |
haswell_modeset_global_resources; |
} |
} else |
dev_priv->display.update_wm = NULL; |
} else if (IS_G4X(dev)) { |
dev_priv->display.write_eld = g4x_write_eld; |
} |
8699,18 → 8825,6 |
/* Acer Aspire 5734Z must invert backlight brightness */ |
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness }, |
/* Acer/eMachines G725 */ |
{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness }, |
/* Acer/eMachines e725 */ |
{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness }, |
/* Acer/Packard Bell NCL20 */ |
{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness }, |
/* Acer Aspire 4736Z */ |
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, |
}; |
static void intel_init_quirks(struct drm_device *dev) |
8739,8 → 8853,13 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
u8 sr1; |
u32 vga_reg = i915_vgacntrl_reg(dev); |
u32 vga_reg; |
if (HAS_PCH_SPLIT(dev)) |
vga_reg = CPU_VGACNTRL; |
else |
vga_reg = VGACNTRL; |
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); |
out8(SR01, VGA_SR_INDEX); |
sr1 = in8(VGA_SR_DATA); |
8754,7 → 8873,10 |
void intel_modeset_init_hw(struct drm_device *dev) |
{ |
intel_init_power_well(dev); |
/* We attempt to init the necessary power wells early in the initialization |
* time, so the subsystems that expect power to be enabled can work. |
*/ |
intel_init_power_wells(dev); |
intel_prepare_ddi(dev); |
8796,7 → 8918,7 |
dev->mode_config.max_width = 8192; |
dev->mode_config.max_height = 8192; |
} |
dev->mode_config.fb_base = dev_priv->gtt.mappable_base; |
dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr; |
DRM_DEBUG_KMS("%d display pipe%s available.\n", |
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
8814,9 → 8936,6 |
/* Just disable it once at startup */ |
i915_disable_vga(dev); |
intel_setup_outputs(dev); |
/* Just in case the BIOS is doing something questionable. */ |
intel_disable_fbc(dev); |
} |
static void |
9010,7 → 9129,7 |
struct intel_encoder *encoder; |
struct intel_connector *connector; |
if (HAS_DDI(dev)) { |
if (IS_HASWELL(dev)) { |
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)); |
if (tmp & TRANS_DDI_FUNC_ENABLE) { |
9051,7 → 9170,7 |
crtc->active ? "enabled" : "disabled"); |
} |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
intel_ddi_setup_hw_pll_state(dev); |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
9102,7 → 9221,9 |
if (force_restore) { |
for_each_pipe(pipe) { |
intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]); |
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); |
intel_set_mode(&crtc->base, &crtc->base.mode, |
crtc->base.x, crtc->base.y, crtc->base.fb); |
} |
// i915_redisable_vga(dev); |
/drivers/video/drm/i915/i915_drv.c |
---|
52,30 → 52,26 |
struct drm_file *drm_file_handlers[256]; |
static int i915_modeset __read_mostly = 1; |
module_param_named(modeset, i915_modeset, int, 0400); |
MODULE_PARM_DESC(modeset, |
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " |
"1=on, -1=force vga console preference [default])"); |
int i915_panel_ignore_lid __read_mostly = 1; |
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); |
int i915_panel_ignore_lid __read_mostly = 0; |
MODULE_PARM_DESC(panel_ignore_lid, |
"Override lid status (0=autodetect, 1=autodetect disabled [default], " |
"-1=force lid closed, -2=force lid open)"); |
"Override lid status (0=autodetect [default], 1=lid open, " |
"-1=lid closed)"); |
unsigned int i915_powersave __read_mostly = 0; |
module_param_named(powersave, i915_powersave, int, 0600); |
MODULE_PARM_DESC(powersave, |
"Enable powersavings, fbc, downclocking, etc. (default: true)"); |
int i915_semaphores __read_mostly = -1; |
module_param_named(semaphores, i915_semaphores, int, 0600); |
MODULE_PARM_DESC(semaphores, |
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); |
int i915_enable_rc6 __read_mostly = 0; |
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); |
MODULE_PARM_DESC(i915_enable_rc6, |
"Enable power-saving render C-state 6. " |
"Different stages can be selected via bitmask values " |
84,41 → 80,34 |
"default: -1 (use per-chip default)"); |
int i915_enable_fbc __read_mostly = 0; |
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
MODULE_PARM_DESC(i915_enable_fbc, |
"Enable frame buffer compression for power savings " |
"(default: -1 (use per-chip default))"); |
unsigned int i915_lvds_downclock __read_mostly = 0; |
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
MODULE_PARM_DESC(lvds_downclock, |
"Use panel (LVDS/eDP) downclocking for power savings " |
"(default: false)"); |
int i915_lvds_channel_mode __read_mostly; |
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600); |
MODULE_PARM_DESC(lvds_channel_mode, |
"Specify LVDS channel mode " |
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)"); |
int i915_panel_use_ssc __read_mostly = -1; |
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
MODULE_PARM_DESC(lvds_use_ssc, |
"Use Spread Spectrum Clock with panels [LVDS/eDP] " |
"(default: auto from VBT)"); |
int i915_vbt_sdvo_panel_type __read_mostly = -1; |
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
MODULE_PARM_DESC(vbt_sdvo_panel_type, |
"Override/Ignore selection of SDVO panel mode in the VBT " |
"(-2=ignore, -1=auto [default], index in VBT BIOS table)"); |
static bool i915_try_reset __read_mostly = true; |
module_param_named(reset, i915_try_reset, bool, 0600); |
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); |
bool i915_enable_hangcheck __read_mostly = false; |
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); |
MODULE_PARM_DESC(enable_hangcheck, |
"Periodically check GPU activity for detecting hangs. " |
"WARNING: Disabling this can cause system wide hangs. " |
125,12 → 114,10 |
"(default: true)"); |
int i915_enable_ppgtt __read_mostly = false; |
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); |
MODULE_PARM_DESC(i915_enable_ppgtt, |
"Enable PPGTT (default: true)"); |
unsigned int i915_preliminary_hw_support __read_mostly = true; |
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600); |
MODULE_PARM_DESC(preliminary_hw_support, |
"Enable preliminary hardware support. " |
"Enable Haswell and ValleyView Support. " |
267,7 → 254,6 |
.has_bsd_ring = 1, |
.has_blt_ring = 1, |
.is_valleyview = 1, |
.display_mmio_offset = VLV_DISPLAY_BASE, |
}; |
static const struct intel_device_info intel_valleyview_d_info = { |
277,7 → 263,6 |
.has_bsd_ring = 1, |
.has_blt_ring = 1, |
.is_valleyview = 1, |
.display_mmio_offset = VLV_DISPLAY_BASE, |
}; |
static const struct intel_device_info intel_haswell_d_info = { |
365,15 → 350,15 |
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ |
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ |
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ |
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ |
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ |
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ |
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ |
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ |
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ |
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ |
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ |
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ |
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ |
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ |
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ |
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ |
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ |
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ |
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), |
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), |
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), |
469,7 → 454,7 |
if( unlikely(ent == NULL) ) |
{ |
dbgprintf("device not found\n"); |
return -ENODEV; |
return 0; |
}; |
struct intel_device_info *intel_info = |
745,6 → 730,8 |
if (dev_priv->forcewake_count == 0) \ |
dev_priv->gt.force_wake_put(dev_priv); \ |
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ |
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ |
val = read##y(dev_priv->regs + reg + 0x180000); \ |
} else { \ |
val = read##y(dev_priv->regs + reg); \ |
} \ |
770,7 → 757,11 |
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \ |
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \ |
} \ |
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \ |
write##y(val, dev_priv->regs + reg + 0x180000); \ |
} else { \ |
write##y(val, dev_priv->regs + reg); \ |
} \ |
if (unlikely(__fifo_ret)) { \ |
gen6_gt_check_fifodbg(dev_priv); \ |
} \ |
/drivers/video/drm/i915/intel_fb.c |
---|
91,10 → 91,9 |
// .fb_debug_leave = drm_fb_helper_debug_leave, |
}; |
static int intelfb_create(struct drm_fb_helper *helper, |
static int intelfb_create(struct intel_fbdev *ifbdev, |
struct drm_fb_helper_surface_size *sizes) |
{ |
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; |
struct drm_device *dev = ifbdev->helper.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct fb_info *info; |
187,7 → 186,8 |
goto out_unpin; |
} |
info->apertures->ranges[0].base = dev->mode_config.fb_base; |
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end; |
info->apertures->ranges[0].size = |
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; |
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset; |
info->fix.smem_len = size; |
223,10 → 223,26 |
return ret; |
} |
static int intel_fb_find_or_create_single(struct drm_fb_helper *helper, |
struct drm_fb_helper_surface_size *sizes) |
{ |
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper; |
int new_fb = 0; |
int ret; |
if (!helper->fb) { |
ret = intelfb_create(ifbdev, sizes); |
if (ret) |
return ret; |
new_fb = 1; |
} |
return new_fb; |
} |
static struct drm_fb_helper_funcs intel_fb_helper_funcs = { |
.gamma_set = intel_crtc_fb_gamma_set, |
.gamma_get = intel_crtc_fb_gamma_get, |
.fb_probe = intelfb_create, |
.fb_probe = intel_fb_find_or_create_single, |
}; |
252,20 → 268,9 |
} |
drm_fb_helper_single_add_all_connectors(&ifbdev->helper); |
drm_fb_helper_initial_config(&ifbdev->helper, 32); |
return 0; |
} |
void intel_fbdev_initial_config(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
/* Due to peculiar init order wrt to hpd handling this is separate. */ |
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32); |
} |
void intel_fb_output_poll_changed(struct drm_device *dev) |
{ |
drm_i915_private_t *dev_priv = dev->dev_private; |
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); |
} |
/drivers/video/drm/i915/Gtt/intel-gtt.c |
---|
15,8 → 15,6 |
* /fairy-tale-mode off |
*/ |
#include <syscall.h> |
#include <linux/module.h> |
#include <errno-base.h> |
#include <linux/pci.h> |
32,6 → 30,7 |
#include "intel-agp.h" |
#include <drm/intel-gtt.h> |
#include <syscall.h> |
struct pci_dev * |
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from); |
87,6 → 86,7 |
}; |
static struct _intel_private { |
struct intel_gtt base; |
const struct intel_gtt_driver *driver; |
struct pci_dev *pcidev; /* device one */ |
struct pci_dev *bridge_dev; |
101,18 → 101,7 |
struct resource ifp_resource; |
int resource_valid; |
struct page *scratch_page; |
phys_addr_t scratch_page_dma; |
int refcount; |
/* Whether i915 needs to use the dmar apis or not. */ |
unsigned int needs_dmar : 1; |
phys_addr_t gma_bus_addr; |
/* Size of memory reserved for graphics by the BIOS */ |
unsigned int stolen_size; |
/* Total number of gtt entries. */ |
unsigned int gtt_total_entries; |
/* Part of the gtt that is mappable by the cpu, for those chips where |
* this is not the full gtt. */ |
unsigned int gtt_mappable_entries; |
} intel_private; |
#define INTEL_GTT_GEN intel_private.driver->gen |
129,7 → 118,7 |
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
if (page == NULL) |
return -ENOMEM; |
intel_private.scratch_page_dma = page_to_phys(page); |
intel_private.base.scratch_page_dma = page_to_phys(page); |
intel_private.scratch_page = page; |
311,7 → 300,7 |
/* On previous hardware, the GTT size was just what was |
* required to map the aperture. |
*/ |
return intel_private.gtt_mappable_entries; |
return intel_private.base.gtt_mappable_entries; |
} |
} |
373,8 → 362,8 |
if (ret != 0) |
return ret; |
intel_private.gtt_mappable_entries = intel_gtt_mappable_entries(); |
intel_private.gtt_total_entries = intel_gtt_total_entries(); |
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); |
intel_private.base.gtt_total_entries = intel_gtt_total_entries(); |
/* save the PGETBL reg for resume */ |
intel_private.PGETBL_save = |
386,10 → 375,10 |
dev_info(&intel_private.bridge_dev->dev, |
"detected gtt size: %dK total, %dK mappable\n", |
intel_private.gtt_total_entries * 4, |
intel_private.gtt_mappable_entries * 4); |
intel_private.base.gtt_total_entries * 4, |
intel_private.base.gtt_mappable_entries * 4); |
gtt_map_size = intel_private.gtt_total_entries * 4; |
gtt_map_size = intel_private.base.gtt_total_entries * 4; |
intel_private.gtt = NULL; |
if (intel_private.gtt == NULL) |
400,12 → 389,13 |
iounmap(intel_private.registers); |
return -ENOMEM; |
} |
intel_private.base.gtt = intel_private.gtt; |
asm volatile("wbinvd"); |
intel_private.stolen_size = intel_gtt_stolen_size(); |
intel_private.base.stolen_size = intel_gtt_stolen_size(); |
intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2; |
ret = intel_gtt_setup_scratch_page(); |
if (ret != 0) { |
420,9 → 410,8 |
pci_read_config_dword(intel_private.pcidev, I915_GMADDR, |
&gma_addr); |
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); |
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); |
return 0; |
} |
539,7 → 528,7 |
unsigned int i; |
for (i = first_entry; i < (first_entry + num_entries); i++) { |
intel_private.driver->write_entry(intel_private.scratch_page_dma, |
intel_private.driver->write_entry(intel_private.base.scratch_page_dma, |
i, 0); |
} |
readl(intel_private.gtt+i-1); |
605,6 → 594,25 |
writel(addr | pte_flags, intel_private.gtt + entry); |
} |
/* Certain Gen5 chipsets require require idling the GPU before |
* unmapping anything from the GTT when VT-d is enabled. |
*/ |
static inline int needs_idle_maps(void) |
{ |
#ifdef CONFIG_INTEL_IOMMU |
const unsigned short gpu_devid = intel_private.pcidev->device; |
/* Query intel_iommu to see if we need the workaround. Presumably that |
* was loaded first. |
*/ |
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || |
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && |
intel_iommu_gfx_mapped) |
return 1; |
#endif |
return 0; |
} |
static int i9xx_setup(void) |
{ |
u32 reg_addr, gtt_addr; |
632,6 → 640,9 |
break; |
} |
if (needs_idle_maps()) |
intel_private.base.do_idle_maps = 1; |
intel_i9xx_setup_flush(); |
return 0; |
783,18 → 794,8 |
struct agp_bridge_data *bridge) |
{ |
int i, mask; |
intel_private.driver = NULL; |
/* |
* Can be called from the fake agp driver but also directly from |
* drm/i915.ko. Hence we need to check whether everything is set up |
* already. |
*/ |
if (intel_private.driver) { |
intel_private.refcount++; |
return 1; |
} |
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { |
if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { |
intel_private.driver = |
806,8 → 807,6 |
if (!intel_private.driver) |
return 0; |
intel_private.refcount++; |
if (bridge) { |
bridge->dev_private_data = &intel_private; |
bridge->dev = bridge_pdev; |
835,13 → 834,9 |
} |
EXPORT_SYMBOL(intel_gmch_probe); |
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size, |
phys_addr_t *mappable_base, unsigned long *mappable_end) |
struct intel_gtt *intel_gtt_get(void) |
{ |
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT; |
*stolen_size = intel_private.stolen_size; |
*mappable_base = intel_private.gma_bus_addr; |
*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT; |
return &intel_private.base; |
} |
EXPORT_SYMBOL(intel_gtt_get); |
/drivers/video/drm/i915/i915_drv.h |
---|
30,8 → 30,6 |
#ifndef _I915_DRV_H_ |
#define _I915_DRV_H_ |
#include <uapi/drm/i915_drm.h> |
#include "i915_reg.h" |
#include "intel_bios.h" |
#include "intel_ringbuffer.h" |
98,12 → 96,7 |
}; |
#define port_name(p) ((p) + 'A') |
#define I915_GEM_GPU_DOMAINS \ |
(I915_GEM_DOMAIN_RENDER | \ |
I915_GEM_DOMAIN_SAMPLER | \ |
I915_GEM_DOMAIN_COMMAND | \ |
I915_GEM_DOMAIN_INSTRUCTION | \ |
I915_GEM_DOMAIN_VERTEX) |
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) |
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++) |
121,19 → 114,6 |
}; |
#define I915_NUM_PLLS 2 |
/* Used by dp and fdi links */ |
struct intel_link_m_n { |
uint32_t tu; |
uint32_t gmch_m; |
uint32_t gmch_n; |
uint32_t link_m; |
uint32_t link_n; |
}; |
void intel_link_compute_m_n(int bpp, int nlanes, |
int pixel_clock, int link_clock, |
struct intel_link_m_n *m_n); |
struct intel_ddi_plls { |
int spll_refcount; |
int wrpll1_refcount; |
163,13 → 143,8 |
#define I915_GEM_PHYS_OVERLAY_REGS 3 |
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) |
struct drm_i915_gem_phys_object { |
int id; |
struct page **page_list; |
drm_dma_handle_t *handle; |
struct drm_i915_gem_object *cur_obj; |
}; |
struct opregion_header; |
struct opregion_acpi; |
struct opregion_swsci; |
312,7 → 287,6 |
struct drm_i915_gem_object *obj); |
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
int x, int y); |
void (*hpd_irq_setup)(struct drm_device *dev); |
/* clock updates for mode set */ |
/* cursor updates */ |
/* render clock increase/decrease */ |
352,7 → 326,6 |
DEV_INFO_FLAG(has_llc) |
struct intel_device_info { |
u32 display_mmio_offset; |
u8 gen; |
u8 is_mobile:1; |
u8 is_i85x:1; |
380,50 → 353,6 |
u8 has_llc:1; |
}; |
enum i915_cache_level { |
I915_CACHE_NONE = 0, |
I915_CACHE_LLC, |
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
}; |
/* The Graphics Translation Table is the way in which GEN hardware translates a |
* Graphics Virtual Address into a Physical Address. In addition to the normal |
* collateral associated with any va->pa translations GEN hardware also has a |
* portion of the GTT which can be mapped by the CPU and remain both coherent |
* and correct (in cases like swizzling). That region is referred to as GMADR in |
* the spec. |
*/ |
struct i915_gtt { |
unsigned long start; /* Start offset of used GTT */ |
size_t total; /* Total size GTT can map */ |
size_t stolen_size; /* Total size of stolen memory */ |
unsigned long mappable_end; /* End offset that we can CPU map */ |
struct io_mapping *mappable; /* Mapping to our CPU mappable region */ |
phys_addr_t mappable_base; /* PA of our GMADR */ |
/** "Graphics Stolen Memory" holds the global PTEs */ |
void __iomem *gsm; |
bool do_idle_maps; |
dma_addr_t scratch_page_dma; |
struct page *scratch_page; |
/* global gtt ops */ |
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, |
size_t *stolen, phys_addr_t *mappable_base, |
unsigned long *mappable_end); |
void (*gtt_remove)(struct drm_device *dev); |
void (*gtt_clear_range)(struct drm_device *dev, |
unsigned int first_entry, |
unsigned int num_entries); |
void (*gtt_insert_entries)(struct drm_device *dev, |
struct sg_table *st, |
unsigned int pg_start, |
enum i915_cache_level cache_level); |
}; |
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT) |
#define I915_PPGTT_PD_ENTRIES 512 |
#define I915_PPGTT_PT_ENTRIES 1024 |
struct i915_hw_ppgtt { |
433,16 → 362,6 |
uint32_t pd_offset; |
dma_addr_t *pt_dma_addr; |
dma_addr_t scratch_page_dma_addr; |
/* pte functions, mirroring the interface of the global gtt. */ |
void (*clear_range)(struct i915_hw_ppgtt *ppgtt, |
unsigned int first_entry, |
unsigned int num_entries); |
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt, |
struct sg_table *st, |
unsigned int pg_start, |
enum i915_cache_level cache_level); |
void (*cleanup)(struct i915_hw_ppgtt *ppgtt); |
}; |
669,9 → 588,6 |
struct mutex hw_lock; |
}; |
/* defined intel_pm.c */ |
extern spinlock_t mchdev_lock; |
struct intel_ilk_power_mgmt { |
u8 cur_delay; |
u8 min_delay; |
712,158 → 628,6 |
struct work_struct error_work; |
}; |
struct i915_gem_mm { |
/** Memory allocator for GTT stolen memory */ |
struct drm_mm stolen; |
/** Memory allocator for GTT */ |
struct drm_mm gtt_space; |
/** List of all objects in gtt_space. Used to restore gtt |
* mappings on resume */ |
struct list_head bound_list; |
/** |
* List of objects which are not bound to the GTT (thus |
* are idle and not used by the GPU) but still have |
* (presumably uncached) pages still attached. |
*/ |
struct list_head unbound_list; |
/** Usable portion of the GTT for GEM */ |
unsigned long stolen_base; /* limited to low memory (32-bit) */ |
int gtt_mtrr; |
/** PPGTT used for aliasing the PPGTT with the GTT */ |
struct i915_hw_ppgtt *aliasing_ppgtt; |
bool shrinker_no_lock_stealing; |
/** |
* List of objects currently involved in rendering. |
* |
* Includes buffers having the contents of their GPU caches |
* flushed, not necessarily primitives. last_rendering_seqno |
* represents when the rendering involved will be completed. |
* |
* A reference is held on the buffer while on this list. |
*/ |
struct list_head active_list; |
/** |
* LRU list of objects which are not in the ringbuffer and |
* are ready to unbind, but are still in the GTT. |
* |
* last_rendering_seqno is 0 while an object is in this list. |
* |
* A reference is not held on the buffer while on this list, |
* as merely being GTT-bound shouldn't prevent its being |
* freed, and we'll pull it off the list in the free path. |
*/ |
struct list_head inactive_list; |
/** LRU list of objects with fence regs on them. */ |
struct list_head fence_list; |
/** |
* We leave the user IRQ off as much as possible, |
* but this means that requests will finish and never |
* be retired once the system goes idle. Set a timer to |
* fire periodically while the ring is running. When it |
* fires, go retire requests. |
*/ |
struct delayed_work retire_work; |
/** |
* Are we in a non-interruptible section of code like |
* modesetting? |
*/ |
bool interruptible; |
/** |
* Flag if the X Server, and thus DRM, is not currently in |
* control of the device. |
* |
* This is set between LeaveVT and EnterVT. It needs to be |
* replaced with a semaphore. It also needs to be |
* transitioned away from for kernel modesetting. |
*/ |
int suspended; |
/** Bit 6 swizzling required for X tiling */ |
uint32_t bit_6_swizzle_x; |
/** Bit 6 swizzling required for Y tiling */ |
uint32_t bit_6_swizzle_y; |
/* storage for physical objects */ |
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
/* accounting, useful for userland debugging */ |
size_t object_memory; |
u32 object_count; |
}; |
struct i915_gpu_error { |
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
/* For reset and error_state handling. */ |
spinlock_t lock; |
/* Protected by the above dev->gpu_error.lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct work; |
unsigned long last_reset; |
/** |
* State variable and reset counter controlling the reset flow |
* |
* Upper bits are for the reset counter. This counter is used by the |
* wait_seqno code to race-free noticed that a reset event happened and |
* that it needs to restart the entire ioctl (since most likely the |
* seqno it waited for won't ever signal anytime soon). |
* |
* This is important for lock-free wait paths, where no contended lock |
* naturally enforces the correct ordering between the bail-out of the |
* waiter and the gpu reset work code. |
* |
* Lowest bit controls the reset state machine: Set means a reset is in |
* progress. This state will (presuming we don't have any bugs) decay |
* into either unset (successful reset) or the special WEDGED value (hw |
* terminally sour). All waiters on the reset_queue will be woken when |
* that happens. |
*/ |
atomic_t reset_counter; |
/** |
* Special values/flags for reset_counter |
* |
* Note that the code relies on |
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG |
* being true. |
*/ |
#define I915_RESET_IN_PROGRESS_FLAG 1 |
#define I915_WEDGED 0xffffffff |
/** |
* Waitqueue to signal when the reset has completed. Used by clients |
* that wait for dev_priv->mm.wedged to settle. |
*/ |
wait_queue_head_t reset_queue; |
/* For gpu hang simulation. */ |
unsigned int stop_rings; |
}; |
enum modeset_restore { |
MODESET_ON_LID_OPEN, |
MODESET_DONE, |
MODESET_SUSPENDED, |
}; |
typedef struct drm_i915_private { |
struct drm_device *dev; |
880,11 → 644,10 |
/** forcewake_count is protected by gt_lock */ |
unsigned forcewake_count; |
/** gt_lock is also taken in irq contexts. */ |
spinlock_t gt_lock; |
struct spinlock gt_lock; |
struct intel_gmbus gmbus[GMBUS_NUM_PORTS]; |
/** gmbus_mutex protects against concurrent usage of the single hw gmbus |
* controller on different i2c buses. */ |
struct mutex gmbus_mutex; |
894,11 → 657,9 |
*/ |
uint32_t gpio_mmio_base; |
wait_queue_head_t gmbus_wait_queue; |
struct pci_dev *bridge_dev; |
struct intel_ring_buffer ring[I915_NUM_RINGS]; |
uint32_t last_seqno, next_seqno; |
uint32_t next_seqno; |
drm_dma_handle_t *status_page_dmah; |
struct resource mch_res; |
908,24 → 669,31 |
/* protects the irq masks */ |
spinlock_t irq_lock; |
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ |
// struct pm_qos_request pm_qos; |
/* DPIO indirect register protection */ |
struct mutex dpio_lock; |
spinlock_t dpio_lock; |
/** Cached value of IMR to avoid reads in updating the bitfield */ |
u32 pipestat[2]; |
u32 irq_mask; |
u32 gt_irq_mask; |
u32 pch_irq_mask; |
u32 hotplug_supported_mask; |
struct work_struct hotplug_work; |
bool enable_hotplug_processing; |
int num_pipe; |
int num_pch_pll; |
/* For hangcheck timer */ |
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ |
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) |
struct timer_list hangcheck_timer; |
int hangcheck_count; |
uint32_t last_acthd[I915_NUM_RINGS]; |
uint32_t prev_instdone[I915_NUM_INSTDONE_REG]; |
unsigned int stop_rings; |
unsigned long cfb_size; |
unsigned int cfb_fb; |
enum plane cfb_plane; |
936,7 → 704,7 |
/* overlay */ |
struct intel_overlay *overlay; |
unsigned int sprite_scaling_enabled; |
bool sprite_scaling_enabled; |
/* LVDS info */ |
int backlight_level; /* restore backlight to this value */ |
953,6 → 721,7 |
unsigned int display_clock_mode:1; |
int lvds_ssc_freq; |
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ |
unsigned int lvds_val; /* used for checking LVDS channel mode */ |
struct { |
int rate; |
int lanes; |
973,6 → 742,11 |
unsigned int fsb_freq, mem_freq, is_ddr3; |
spinlock_t error_lock; |
/* Protected by dev->error_lock. */ |
struct drm_i915_error_state *first_error; |
struct work_struct error_work; |
struct completion error_completion; |
struct workqueue_struct *wq; |
/* Display functions */ |
984,13 → 758,116 |
unsigned long quirks; |
enum modeset_restore modeset_restore; |
struct mutex modeset_restore_lock; |
/* Register state */ |
bool modeset_on_lid; |
struct i915_gtt gtt; |
struct { |
/** Bridge to intel-gtt-ko */ |
struct intel_gtt *gtt; |
/** Memory allocator for GTT stolen memory */ |
struct drm_mm stolen; |
/** Memory allocator for GTT */ |
struct drm_mm gtt_space; |
/** List of all objects in gtt_space. Used to restore gtt |
* mappings on resume */ |
struct list_head bound_list; |
/** |
* List of objects which are not bound to the GTT (thus |
* are idle and not used by the GPU) but still have |
* (presumably uncached) pages still attached. |
*/ |
struct list_head unbound_list; |
struct i915_gem_mm mm; |
/** Usable portion of the GTT for GEM */ |
unsigned long gtt_start; |
unsigned long gtt_mappable_end; |
unsigned long gtt_end; |
// struct io_mapping *gtt_mapping; |
phys_addr_t gtt_base_addr; |
int gtt_mtrr; |
/** PPGTT used for aliasing the PPGTT with the GTT */ |
struct i915_hw_ppgtt *aliasing_ppgtt; |
// struct shrinker inactive_shrinker; |
bool shrinker_no_lock_stealing; |
/** |
* List of objects currently involved in rendering. |
* |
* Includes buffers having the contents of their GPU caches |
* flushed, not necessarily primitives. last_rendering_seqno |
* represents when the rendering involved will be completed. |
* |
* A reference is held on the buffer while on this list. |
*/ |
struct list_head active_list; |
/** |
* LRU list of objects which are not in the ringbuffer and |
* are ready to unbind, but are still in the GTT. |
* |
* last_rendering_seqno is 0 while an object is in this list. |
* |
* A reference is not held on the buffer while on this list, |
* as merely being GTT-bound shouldn't prevent its being |
* freed, and we'll pull it off the list in the free path. |
*/ |
struct list_head inactive_list; |
/** LRU list of objects with fence regs on them. */ |
struct list_head fence_list; |
/** |
* We leave the user IRQ off as much as possible, |
* but this means that requests will finish and never |
* be retired once the system goes idle. Set a timer to |
* fire periodically while the ring is running. When it |
* fires, go retire requests. |
*/ |
struct delayed_work retire_work; |
/** |
* Are we in a non-interruptible section of code like |
* modesetting? |
*/ |
bool interruptible; |
/** |
* Flag if the X Server, and thus DRM, is not currently in |
* control of the device. |
* |
* This is set between LeaveVT and EnterVT. It needs to be |
* replaced with a semaphore. It also needs to be |
* transitioned away from for kernel modesetting. |
*/ |
int suspended; |
/** |
* Flag if the hardware appears to be wedged. |
* |
* This is set when attempts to idle the device timeout. |
* It prevents command submission from occurring and makes |
* every pending request fail |
*/ |
atomic_t wedged; |
/** Bit 6 swizzling required for X tiling */ |
uint32_t bit_6_swizzle_x; |
/** Bit 6 swizzling required for Y tiling */ |
uint32_t bit_6_swizzle_y; |
/* storage for physical objects */ |
// struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; |
/* accounting, useful for userland debugging */ |
size_t gtt_total; |
size_t mappable_gtt_total; |
size_t object_memory; |
u32 object_count; |
} mm; |
/* Kernel Modesetting */ |
struct sdvo_device_mapping sdvo_mappings[2]; |
1031,7 → 908,7 |
struct drm_mm_node *compressed_fb; |
struct drm_mm_node *compressed_llb; |
struct i915_gpu_error gpu_error; |
unsigned long last_gpu_reset; |
/* list of fbdev register on this device */ |
struct intel_fbdev *fbdev; |
1050,7 → 927,7 |
bool hw_contexts_disabled; |
uint32_t hw_context_size; |
u32 fdi_rx_config; |
bool fdi_rx_polarity_reversed; |
struct i915_suspend_saved_registers regfile; |
1071,7 → 948,11 |
HDMI_AUDIO_ON, /* force turn on HDMI audio */ |
}; |
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1) |
enum i915_cache_level { |
I915_CACHE_NONE = 0, |
I915_CACHE_LLC, |
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */ |
}; |
struct drm_i915_gem_object_ops { |
/* Interface between the GEM object and its backing storage. |
1096,10 → 977,10 |
const struct drm_i915_gem_object_ops *ops; |
// void *mapped; |
/** Current space allocated to this object in the GTT, if any. */ |
struct drm_mm_node *gtt_space; |
/** Stolen memory for this object, instead of being backed by shmem. */ |
struct drm_mm_node *stolen; |
struct list_head gtt_list; |
/** This object's place on the active/inactive lists */ |
1184,6 → 1065,7 |
unsigned int has_global_gtt_mapping:1; |
unsigned int has_dma_mapping:1; |
// dma_addr_t *allocated_pages; |
struct sg_table *pages; |
int pages_pin_count; |
1225,6 → 1107,13 |
/** for phy allocated objects */ |
struct drm_i915_gem_phys_object *phys_obj; |
/** |
* Number of crtcs where this object is currently the fb, but |
* will be page flipped away on the next vblank. When it |
* reaches 0, dev_priv->pending_flip_queue will be woken up. |
*/ |
atomic_t pending_flip; |
}; |
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) |
1263,7 → 1152,7 |
struct drm_i915_file_private { |
struct { |
spinlock_t lock; |
struct spinlock lock; |
struct list_head request_list; |
} mm; |
struct idr context_idr; |
1349,8 → 1238,6 |
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5) |
#define HAS_DDI(dev) (IS_HASWELL(dev)) |
#define INTEL_PCH_DEVICE_ID_MASK 0xff00 |
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 |
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 |
1406,7 → 1293,6 |
extern bool i915_enable_hangcheck __read_mostly; |
extern int i915_enable_ppgtt __read_mostly; |
extern unsigned int i915_preliminary_hw_support __read_mostly; |
extern int i915_disable_power_well __read_mostly; |
extern int i915_master_create(struct drm_device *dev, struct drm_master *master); |
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); |
1443,7 → 1329,6 |
void i915_handle_error(struct drm_device *dev, bool wedged); |
extern void intel_irq_init(struct drm_device *dev); |
extern void intel_hpd_init(struct drm_device *dev); |
extern void intel_gt_init(struct drm_device *dev); |
extern void intel_gt_reset(struct drm_device *dev); |
1512,8 → 1397,6 |
int i915_gem_wait_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void i915_gem_load(struct drm_device *dev); |
void *i915_gem_object_alloc(struct drm_device *dev); |
void i915_gem_object_free(struct drm_i915_gem_object *obj); |
int i915_gem_init_object(struct drm_gem_object *obj); |
void i915_gem_object_init(struct drm_i915_gem_object *obj, |
const struct drm_i915_gem_object_ops *ops); |
1520,7 → 1403,6 |
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, |
size_t size); |
void i915_gem_free_object(struct drm_gem_object *obj); |
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj, |
uint32_t alignment, |
bool map_and_fenceable, |
1527,7 → 1409,6 |
bool nonblocking); |
void i915_gem_object_unpin(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj); |
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); |
void i915_gem_release_mmap(struct drm_i915_gem_object *obj); |
void i915_gem_lastclose(struct drm_device *dev); |
1579,8 → 1460,8 |
return (int32_t)(seq1 - seq2) >= 0; |
} |
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); |
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); |
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); |
1606,19 → 1487,9 |
void i915_gem_retire_requests(struct drm_device *dev); |
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); |
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error, |
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv, |
bool interruptible); |
static inline bool i915_reset_in_progress(struct i915_gpu_error *error) |
{ |
return unlikely(atomic_read(&error->reset_counter) |
& I915_RESET_IN_PROGRESS_FLAG); |
} |
static inline bool i915_terminally_wedged(struct i915_gpu_error *error) |
{ |
return atomic_read(&error->reset_counter) == I915_WEDGED; |
} |
void i915_gem_reset(struct drm_device *dev); |
void i915_gem_clflush_object(struct drm_i915_gem_object *obj); |
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj, |
1658,10 → 1529,9 |
void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
uint32_t |
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode); |
uint32_t |
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size, |
int tiling_mode, bool fenced); |
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev, |
uint32_t size, |
int tiling_mode); |
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level); |
1682,6 → 1552,7 |
struct drm_file *file); |
/* i915_gem_gtt.c */ |
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev); |
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); |
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj, |
1695,10 → 1566,12 |
enum i915_cache_level cache_level); |
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); |
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); |
void i915_gem_init_global_gtt(struct drm_device *dev); |
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, |
unsigned long mappable_end, unsigned long end); |
void i915_gem_init_global_gtt(struct drm_device *dev, |
unsigned long start, |
unsigned long mappable_end, |
unsigned long end); |
int i915_gem_gtt_init(struct drm_device *dev); |
void i915_gem_gtt_fini(struct drm_device *dev); |
static inline void i915_gem_chipset_flush(struct drm_device *dev) |
{ |
if (INTEL_INFO(dev)->gen < 6) |
1716,22 → 1589,9 |
/* i915_gem_stolen.c */ |
int i915_gem_init_stolen(struct drm_device *dev); |
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); |
void i915_gem_stolen_cleanup_compression(struct drm_device *dev); |
void i915_gem_cleanup_stolen(struct drm_device *dev); |
struct drm_i915_gem_object * |
i915_gem_object_create_stolen(struct drm_device *dev, u32 size); |
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj); |
/* i915_gem_tiling.c */ |
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) |
{ |
drm_i915_private_t *dev_priv = obj->base.dev->dev_private; |
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && |
obj->tiling_mode != I915_TILING_NONE; |
} |
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); |
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj); |
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj); |
1757,9 → 1617,9 |
extern int i915_save_state(struct drm_device *dev); |
extern int i915_restore_state(struct drm_device *dev); |
/* i915_ums.c */ |
void i915_save_display_reg(struct drm_device *dev); |
void i915_restore_display_reg(struct drm_device *dev); |
/* i915_suspend.c */ |
extern int i915_save_state(struct drm_device *dev); |
extern int i915_restore_state(struct drm_device *dev); |
/* i915_sysfs.c */ |
void i915_setup_sysfs(struct drm_device *dev_priv); |
1816,7 → 1676,6 |
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); |
extern void intel_modeset_setup_hw_state(struct drm_device *dev, |
bool force_restore); |
extern void i915_redisable_vga(struct drm_device *dev); |
extern bool intel_fbc_enabled(struct drm_device *dev); |
extern void intel_disable_fbc(struct drm_device *dev); |
extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1889,21 → 1748,6 |
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) |
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) |
/* "Broadcast RGB" property */ |
#define INTEL_BROADCAST_RGB_AUTO 0 |
#define INTEL_BROADCAST_RGB_FULL 1 |
#define INTEL_BROADCAST_RGB_LIMITED 2 |
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev) |
{ |
if (HAS_PCH_SPLIT(dev)) |
return CPU_VGACNTRL; |
else if (IS_VALLEYVIEW(dev)) |
return VLV_VGACNTRL; |
else |
return VGACNTRL; |
} |
typedef struct |
{ |
int width; |
/drivers/video/drm/i915/i915_gem_context.c |
---|
128,8 → 128,13 |
static void do_destroy(struct i915_hw_context *ctx) |
{ |
struct drm_device *dev = ctx->obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (ctx->file_priv) |
idr_remove(&ctx->file_priv->context_idr, ctx->id); |
else |
BUG_ON(ctx != dev_priv->ring[RCS].default_context); |
drm_gem_object_unreference(&ctx->obj->base); |
kfree(ctx); |
141,7 → 146,7 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_hw_context *ctx; |
int ret; |
int ret, id; |
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
if (ctx == NULL) |
166,12 → 171,23 |
ctx->file_priv = file_priv; |
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0, |
GFP_KERNEL); |
if (ret < 0) |
again: |
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) { |
ret = -ENOMEM; |
DRM_DEBUG_DRIVER("idr allocation failed\n"); |
goto err_out; |
ctx->id = ret; |
} |
ret = idr_get_new_above(&file_priv->context_idr, ctx, |
DEFAULT_CONTEXT_ID + 1, &id); |
if (ret == 0) |
ctx->id = id; |
if (ret == -EAGAIN) |
goto again; |
else if (ret) |
goto err_out; |
return ctx; |
err_out: |
229,7 → 245,11 |
void i915_gem_context_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t ctx_size; |
dev_priv->hw_contexts_disabled = true; |
return; |
#if 0 |
if (!HAS_HW_CONTEXTS(dev)) { |
dev_priv->hw_contexts_disabled = true; |
241,9 → 261,11 |
dev_priv->ring[RCS].default_context) |
return; |
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); |
ctx_size = get_context_size(dev); |
dev_priv->hw_context_size = get_context_size(dev); |
dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096); |
if (dev_priv->hw_context_size > (1<<20)) { |
if (ctx_size <= 0 || ctx_size > (1<<20)) { |
dev_priv->hw_contexts_disabled = true; |
return; |
} |
/drivers/video/drm/i915/i915_gem_gtt.c |
---|
24,12 → 24,6 |
#define iowrite32(v, addr) writel((v), (addr)) |
#define AGP_NORMAL_MEMORY 0 |
#define AGP_USER_TYPES (1 << 16) |
#define AGP_USER_MEMORY (AGP_USER_TYPES) |
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
#include <drm/drmP.h> |
#include <drm/i915_drm.h> |
#include "i915_drv.h" |
36,6 → 30,10 |
#include "i915_trace.h" |
#include "intel_drv.h" |
#define AGP_USER_TYPES (1 << 16) |
#define AGP_USER_MEMORY (AGP_USER_TYPES) |
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) |
typedef uint32_t gtt_pte_t; |
/* PPGTT stuff */ |
52,7 → 50,7 |
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1) |
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr) |
static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev, |
static inline gtt_pte_t pte_encode(struct drm_device *dev, |
dma_addr_t addr, |
enum i915_cache_level level) |
{ |
85,7 → 83,7 |
} |
/* PPGTT support for Sandybdrige/Gen6 and later */ |
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt, |
unsigned first_entry, |
unsigned num_entries) |
{ |
95,16 → 93,15 |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned last_pte, i; |
scratch_pte = gen6_pte_encode(ppgtt->dev, |
ppgtt->scratch_page_dma_addr, |
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr, |
I915_CACHE_LLC); |
pt_vaddr = AllocKernelSpace(4096); |
if(pt_vaddr == NULL) |
return; |
while (num_entries) { |
if(pt_vaddr != NULL) |
{ |
while (num_entries) |
{ |
last_pte = first_pte + num_entries; |
if (last_pte > I915_PPGTT_PT_ENTRIES) |
last_pte = I915_PPGTT_PT_ENTRIES; |
117,81 → 114,15 |
num_entries -= last_pte - first_pte; |
first_pte = 0; |
act_pd++; |
}; |
FreeKernelSpace(pt_vaddr); |
} |
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt, |
struct sg_table *pages, |
unsigned first_entry, |
enum i915_cache_level cache_level) |
{ |
gtt_pte_t *pt_vaddr; |
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned i, j, m, segment_len; |
dma_addr_t page_addr; |
struct scatterlist *sg; |
/* init sg walking */ |
sg = pages->sgl; |
i = 0; |
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; |
m = 0; |
pt_vaddr = AllocKernelSpace(4096); |
if(pt_vaddr == NULL) |
return; |
while (i < pages->nents) { |
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3); |
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { |
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr, |
cache_level); |
/* grab the next page */ |
if (++m == segment_len) { |
if (++i == pages->nents) |
break; |
sg = sg_next(sg); |
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; |
m = 0; |
} |
} |
first_pte = 0; |
act_pd++; |
} |
FreeKernelSpace(pt_vaddr); |
}; |
} |
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt) |
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) |
{ |
int i; |
if (ppgtt->pt_dma_addr) { |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
pci_unmap_page(ppgtt->dev->pdev, |
ppgtt->pt_dma_addr[i], |
4096, PCI_DMA_BIDIRECTIONAL); |
} |
kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
__free_page(ppgtt->pt_pages[i]); |
kfree(ppgtt->pt_pages); |
kfree(ppgtt); |
} |
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
{ |
struct drm_device *dev = ppgtt->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_hw_ppgtt *ppgtt; |
unsigned first_pd_entry_in_global_pt; |
int i; |
int ret = -ENOMEM; |
199,17 → 130,17 |
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 |
* entries. For aliasing ppgtt support we just steal them at the end for |
* now. */ |
first_pd_entry_in_global_pt = |
gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES; |
first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; |
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
if (!ppgtt) |
return ret; |
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES; |
ppgtt->clear_range = gen6_ppgtt_clear_range; |
ppgtt->insert_entries = gen6_ppgtt_insert_entries; |
ppgtt->cleanup = gen6_ppgtt_cleanup; |
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries, |
GFP_KERNEL); |
if (!ppgtt->pt_pages) |
return -ENOMEM; |
goto err_ppgtt; |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL); |
217,7 → 148,10 |
goto err_pt_alloc; |
} |
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries, |
/* |
if (dev_priv->mm.gtt->needs_dmar) { |
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) |
*ppgtt->num_pd_entries, |
GFP_KERNEL); |
if (!ppgtt->pt_dma_addr) |
goto err_pt_alloc; |
225,75 → 159,126 |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
dma_addr_t pt_addr; |
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096, |
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], |
0, 4096, |
PCI_DMA_BIDIRECTIONAL); |
if (pci_dma_mapping_error(dev->pdev, |
pt_addr)) { |
ret = -EIO; |
goto err_pd_pin; |
} |
ppgtt->pt_dma_addr[i] = pt_addr; |
} |
} |
*/ |
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; |
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma; |
ppgtt->clear_range(ppgtt, 0, |
i915_ppgtt_clear_range(ppgtt, 0, |
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES); |
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t); |
dev_priv->mm.aliasing_ppgtt = ppgtt; |
return 0; |
err_pd_pin: |
if (ppgtt->pt_dma_addr) { |
for (i--; i >= 0; i--) |
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], |
4096, PCI_DMA_BIDIRECTIONAL); |
} |
// if (ppgtt->pt_dma_addr) { |
// for (i--; i >= 0; i--) |
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], |
// 4096, PCI_DMA_BIDIRECTIONAL); |
// } |
err_pt_alloc: |
kfree(ppgtt->pt_dma_addr); |
// kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
if (ppgtt->pt_pages[i]) |
__free_page(ppgtt->pt_pages[i]); |
FreePage((addr_t)(ppgtt->pt_pages[i])); |
} |
kfree(ppgtt->pt_pages); |
err_ppgtt: |
kfree(ppgtt); |
return ret; |
} |
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) |
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_hw_ppgtt *ppgtt; |
int ret; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
int i; |
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); |
if (!ppgtt) |
return -ENOMEM; |
return; |
ppgtt->dev = dev; |
// if (ppgtt->pt_dma_addr) { |
// for (i = 0; i < ppgtt->num_pd_entries; i++) |
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i], |
// 4096, PCI_DMA_BIDIRECTIONAL); |
// } |
ret = gen6_ppgtt_init(ppgtt); |
if (ret) |
// kfree(ppgtt->pt_dma_addr); |
for (i = 0; i < ppgtt->num_pd_entries; i++) |
FreePage((addr_t)(ppgtt->pt_pages[i])); |
kfree(ppgtt->pt_pages); |
kfree(ppgtt); |
else |
dev_priv->mm.aliasing_ppgtt = ppgtt; |
return ret; |
} |
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev) |
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt, |
const struct sg_table *pages, |
unsigned first_entry, |
enum i915_cache_level cache_level) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
gtt_pte_t *pt_vaddr; |
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES; |
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES; |
unsigned i, j, m, segment_len; |
dma_addr_t page_addr; |
struct scatterlist *sg; |
if (!ppgtt) |
/* init sg walking */ |
sg = pages->sgl; |
i = 0; |
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; |
m = 0; |
pt_vaddr = AllocKernelSpace(4096); |
if( pt_vaddr == NULL) |
return; |
ppgtt->cleanup(ppgtt); |
while (i < pages->nents) { |
MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3); |
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) { |
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr, |
cache_level); |
/* grab the next page */ |
if (++m == segment_len) { |
if (++i == pages->nents) |
break; |
sg = sg_next(sg); |
segment_len = sg_dma_len(sg) >> PAGE_SHIFT; |
m = 0; |
} |
} |
first_pte = 0; |
act_pd++; |
} |
FreeKernelSpace(pt_vaddr); |
} |
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
{ |
ppgtt->insert_entries(ppgtt, obj->pages, |
i915_ppgtt_insert_sg_entries(ppgtt, |
obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
cache_level); |
} |
301,7 → 286,7 |
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, |
struct drm_i915_gem_object *obj) |
{ |
ppgtt->clear_range(ppgtt, |
i915_ppgtt_clear_range(ppgtt, |
obj->gtt_space->start >> PAGE_SHIFT, |
obj->base.size >> PAGE_SHIFT); |
} |
312,7 → 297,7 |
uint32_t pd_offset; |
struct intel_ring_buffer *ring; |
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; |
gtt_pte_t __iomem *pd_addr; |
uint32_t __iomem *pd_addr; |
uint32_t pd_entry; |
int i; |
320,11 → 305,15 |
return; |
pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t); |
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t); |
for (i = 0; i < ppgtt->num_pd_entries; i++) { |
dma_addr_t pt_addr; |
if (dev_priv->mm.gtt->needs_dmar) |
pt_addr = ppgtt->pt_dma_addr[i]; |
else |
pt_addr = page_to_phys(ppgtt->pt_pages[i]); |
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr); |
pd_entry |= GEN6_PDE_VALID; |
364,27 → 353,11 |
} |
} |
extern int intel_iommu_gfx_mapped; |
/* Certain Gen5 chipsets require require idling the GPU before |
* unmapping anything from the GTT when VT-d is enabled. |
*/ |
static inline bool needs_idle_maps(struct drm_device *dev) |
{ |
#ifdef CONFIG_INTEL_IOMMU |
/* Query intel_iommu to see if we need the workaround. Presumably that |
* was loaded first. |
*/ |
if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped) |
return true; |
#endif |
return false; |
} |
static bool do_idling(struct drm_i915_private *dev_priv) |
{ |
bool ret = dev_priv->mm.interruptible; |
if (unlikely(dev_priv->gtt.do_idle_maps)) { |
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { |
dev_priv->mm.interruptible = false; |
if (i915_gpu_idle(dev_priv->dev)) { |
DRM_ERROR("Couldn't idle GPU\n"); |
398,10 → 371,39 |
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) |
{ |
if (unlikely(dev_priv->gtt.do_idle_maps)) |
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) |
dev_priv->mm.interruptible = interruptible; |
} |
static void i915_ggtt_clear_range(struct drm_device *dev, |
unsigned first_entry, |
unsigned num_entries) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
gtt_pte_t scratch_pte; |
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry; |
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; |
int i; |
if (INTEL_INFO(dev)->gen < 6) { |
intel_gtt_clear_range(first_entry, num_entries); |
return; |
} |
if (WARN(num_entries > max_entries, |
"First entry = %d; Num entries = %d (max=%d)\n", |
first_entry, num_entries, max_entries)) |
num_entries = max_entries; |
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC); |
for (i = 0; i < num_entries; i++) |
iowrite32(scratch_pte, >t_base[i]); |
readl(gtt_base); |
} |
#if 0 |
void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
408,8 → 410,8 |
struct drm_i915_gem_object *obj; |
/* First fill our portion of the GTT with scratch pages */ |
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE, |
dev_priv->gtt.total / PAGE_SIZE); |
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE, |
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE); |
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
i915_gem_clflush_object(obj); |
418,17 → 420,30 |
i915_gem_chipset_flush(dev); |
} |
#endif |
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) |
{ |
struct scatterlist *sg, *s; |
unsigned int nents ; |
int i; |
if (obj->has_dma_mapping) |
return 0; |
if (!dma_map_sg(&obj->base.dev->pdev->dev, |
obj->pages->sgl, obj->pages->nents, |
PCI_DMA_BIDIRECTIONAL)) |
return -ENOSPC; |
sg = obj->pages->sgl; |
nents = obj->pages->nents; |
WARN_ON(nents == 0 || sg[0].length == 0); |
for_each_sg(sg, s, nents, i) { |
BUG_ON(!sg_page(s)); |
s->dma_address = sg_phys(s); |
} |
asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
return 0; |
} |
438,15 → 453,16 |
* within the global GTT as well as accessible by the GPU through the GMADR |
* mapped BAR (dev_priv->mm.gtt->gtt). |
*/ |
static void gen6_ggtt_insert_entries(struct drm_device *dev, |
struct sg_table *st, |
unsigned int first_entry, |
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level level) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct sg_table *st = obj->pages; |
struct scatterlist *sg = st->sgl; |
gtt_pte_t __iomem *gtt_entries = |
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry; |
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT; |
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry; |
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry; |
int unused, i = 0; |
unsigned int len, m = 0; |
dma_addr_t addr; |
455,12 → 471,14 |
len = sg_dma_len(sg) >> PAGE_SHIFT; |
for (m = 0; m < len; m++) { |
addr = sg_dma_address(sg) + (m << PAGE_SHIFT); |
iowrite32(gen6_pte_encode(dev, addr, level), |
>t_entries[i]); |
iowrite32(pte_encode(dev, addr, level), >t_entries[i]); |
i++; |
} |
} |
BUG_ON(i > max_entries); |
BUG_ON(i != obj->base.size / PAGE_SIZE); |
/* XXX: This serves as a posting read to make sure that the PTE has |
* actually been updated. There is some concern that even though |
* registers and PTEs are within the same BAR that they are potentially |
468,8 → 486,7 |
* hardware should work, we must keep this posting read for paranoia. |
*/ |
if (i != 0) |
WARN_ON(readl(>t_entries[i-1]) |
!= gen6_pte_encode(dev, addr, level)); |
WARN_ON(readl(>t_entries[i-1]) != pte_encode(dev, addr, level)); |
/* This next bit makes the above posting read even more important. We |
* want to flush the TLBs only after we're certain all the PTE updates |
479,68 → 496,26 |
POSTING_READ(GFX_FLSH_CNTL_GEN6); |
} |
static void gen6_ggtt_clear_range(struct drm_device *dev, |
unsigned int first_entry, |
unsigned int num_entries) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
gtt_pte_t scratch_pte; |
gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry; |
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry; |
int i; |
if (WARN(num_entries > max_entries, |
"First entry = %d; Num entries = %d (max=%d)\n", |
first_entry, num_entries, max_entries)) |
num_entries = max_entries; |
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma, |
I915_CACHE_LLC); |
for (i = 0; i < num_entries; i++) |
iowrite32(scratch_pte, >t_base[i]); |
readl(gtt_base); |
} |
static void i915_ggtt_insert_entries(struct drm_device *dev, |
struct sg_table *st, |
unsigned int pg_start, |
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
{ |
struct drm_device *dev = obj->base.dev; |
if (INTEL_INFO(dev)->gen < 6) { |
unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; |
intel_gtt_insert_sg_entries(st, pg_start, flags); |
intel_gtt_insert_sg_entries(obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
flags); |
} else { |
gen6_ggtt_bind_object(obj, cache_level); |
} |
static void i915_ggtt_clear_range(struct drm_device *dev, |
unsigned int first_entry, |
unsigned int num_entries) |
{ |
intel_gtt_clear_range(first_entry, num_entries); |
} |
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, |
enum i915_cache_level cache_level) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
dev_priv->gtt.gtt_insert_entries(dev, obj->pages, |
obj->gtt_space->start >> PAGE_SHIFT, |
cache_level); |
obj->has_global_gtt_mapping = 1; |
} |
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
{ |
struct drm_device *dev = obj->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
dev_priv->gtt.gtt_clear_range(obj->base.dev, |
i915_ggtt_clear_range(obj->base.dev, |
obj->gtt_space->start >> PAGE_SHIFT, |
obj->base.size >> PAGE_SHIFT); |
555,10 → 530,10 |
interruptible = do_idling(dev_priv); |
if (!obj->has_dma_mapping) |
dma_unmap_sg(&dev->pdev->dev, |
obj->pages->sgl, obj->pages->nents, |
PCI_DMA_BIDIRECTIONAL); |
// if (!obj->has_dma_mapping) |
// dma_unmap_sg(&dev->pdev->dev, |
// obj->pages->sgl, obj->pages->nents, |
// PCI_DMA_BIDIRECTIONAL); |
undo_idling(dev_priv, interruptible); |
} |
579,104 → 554,29 |
*end -= 4096; |
} |
} |
void i915_gem_setup_global_gtt(struct drm_device *dev, |
void i915_gem_init_global_gtt(struct drm_device *dev, |
unsigned long start, |
unsigned long mappable_end, |
unsigned long end) |
{ |
/* Let GEM Manage all of the aperture. |
* |
* However, leave one page at the end still bound to the scratch page. |
* There are a number of places where the hardware apparently prefetches |
* past the end of the object, and we've seen multiple hangs with the |
* GPU head pointer stuck in a batchbuffer bound at the last page of the |
* aperture. One page should be enough to keep any prefetching inside |
* of the aperture. |
*/ |
drm_i915_private_t *dev_priv = dev->dev_private; |
struct drm_mm_node *entry; |
struct drm_i915_gem_object *obj; |
unsigned long hole_start, hole_end; |
BUG_ON(mappable_end > end); |
/* Subtract the guard page ... */ |
/* Substract the guard page ... */ |
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE); |
if (!HAS_LLC(dev)) |
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust; |
/* Mark any preallocated objects as occupied */ |
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) { |
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n", |
obj->gtt_offset, obj->base.size); |
dev_priv->mm.gtt_start = start; |
dev_priv->mm.gtt_mappable_end = mappable_end; |
dev_priv->mm.gtt_end = end; |
dev_priv->mm.gtt_total = end - start; |
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start; |
BUG_ON(obj->gtt_space != I915_GTT_RESERVED); |
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space, |
obj->gtt_offset, |
obj->base.size, |
false); |
obj->has_global_gtt_mapping = 1; |
/* ... but ensure that we clear the entire range. */ |
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE); |
} |
dev_priv->gtt.start = start; |
dev_priv->gtt.total = end - start; |
/* Clear any non-preallocated blocks */ |
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space, |
hole_start, hole_end) { |
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
hole_start, hole_end); |
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE, |
(hole_end-hole_start) / PAGE_SIZE); |
} |
/* And finally clear the reserved guard page */ |
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1); |
} |
static bool |
intel_enable_ppgtt(struct drm_device *dev) |
{ |
if (i915_enable_ppgtt >= 0) |
return i915_enable_ppgtt; |
#ifdef CONFIG_INTEL_IOMMU |
/* Disable ppgtt on SNB if VT-d is on. */ |
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) |
return false; |
#endif |
return true; |
} |
void i915_gem_init_global_gtt(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned long gtt_size, mappable_size; |
gtt_size = dev_priv->gtt.total; |
mappable_size = dev_priv->gtt.mappable_end; |
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) { |
int ret; |
/* PPGTT pdes are stolen from global gtt ptes, so shrink the |
* aperture accordingly when using aliasing ppgtt. */ |
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
gtt_size -= LFB_SIZE; |
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size); |
ret = i915_gem_init_aliasing_ppgtt(dev); |
if (!ret) |
return; |
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret); |
drm_mm_takedown(&dev_priv->mm.gtt_space); |
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE; |
} |
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size); |
} |
static int setup_scratch_page(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
686,8 → 586,6 |
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
if (page == NULL) |
return -ENOMEM; |
get_page(page); |
set_pages_uc(page, 1); |
#ifdef CONFIG_INTEL_IOMMU |
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE, |
697,21 → 595,12 |
#else |
dma_addr = page_to_phys(page); |
#endif |
dev_priv->gtt.scratch_page = page; |
dev_priv->gtt.scratch_page_dma = dma_addr; |
dev_priv->mm.gtt->scratch_page = page; |
dev_priv->mm.gtt->scratch_page_dma = dma_addr; |
return 0; |
} |
static void teardown_scratch_page(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
set_pages_wb(dev_priv->gtt.scratch_page, 1); |
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma, |
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
put_page(dev_priv->gtt.scratch_page); |
__free_page(dev_priv->gtt.scratch_page); |
} |
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
{ |
720,7 → 609,7 |
return snb_gmch_ctl << 20; |
} |
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl) |
{ |
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; |
snb_gmch_ctl &= SNB_GMCH_GMS_MASK; |
727,7 → 616,7 |
return snb_gmch_ctl << 25; /* 32 MB units */ |
} |
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) |
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl) |
{ |
static const int stolen_decoder[] = { |
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; |
736,131 → 625,92 |
return stolen_decoder[snb_gmch_ctl] << 20; |
} |
static int gen6_gmch_probe(struct drm_device *dev, |
size_t *gtt_total, |
size_t *stolen, |
phys_addr_t *mappable_base, |
unsigned long *mappable_end) |
int i915_gem_gtt_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
phys_addr_t gtt_bus_addr; |
unsigned int gtt_size; |
u16 snb_gmch_ctl; |
int ret; |
*mappable_base = pci_resource_start(dev->pdev, 2); |
*mappable_end = pci_resource_len(dev->pdev, 2); |
/* 64/512MB is the current min/max we actually know of, but this is just |
* a coarse sanity check. |
/* On modern platforms we need not worry ourself with the legacy |
* hostbridge query stuff. Skip it entirely |
*/ |
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) { |
DRM_ERROR("Unknown GMADR size (%lx)\n", |
dev_priv->gtt.mappable_end); |
return -ENXIO; |
if (INTEL_INFO(dev)->gen < 6) { |
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL); |
if (!ret) { |
DRM_ERROR("failed to set up gmch\n"); |
return -EIO; |
} |
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40))) |
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40)); |
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); |
dev_priv->mm.gtt = intel_gtt_get(); |
if (!dev_priv->mm.gtt) { |
DRM_ERROR("Failed to initialize GTT\n"); |
return -ENODEV; |
} |
return 0; |
} |
if (IS_GEN7(dev)) |
*stolen = gen7_get_stolen_size(snb_gmch_ctl); |
else |
*stolen = gen6_get_stolen_size(snb_gmch_ctl); |
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL); |
if (!dev_priv->mm.gtt) |
return -ENOMEM; |
*gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT; |
#ifdef CONFIG_INTEL_IOMMU |
dev_priv->mm.gtt->needs_dmar = 1; |
#endif |
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */ |
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20); |
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size); |
if (!dev_priv->gtt.gsm) { |
DRM_ERROR("Failed to map the gtt page table\n"); |
return -ENOMEM; |
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2); |
/* i9xx_setup */ |
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
dev_priv->mm.gtt->gtt_total_entries = |
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t); |
if (INTEL_INFO(dev)->gen < 7) |
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); |
else |
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl); |
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT; |
/* 64/512MB is the current min/max we actually know of, but this is just a |
* coarse sanity check. |
*/ |
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 || |
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) { |
DRM_ERROR("Unknown GMADR entries (%d)\n", |
dev_priv->mm.gtt->gtt_mappable_entries); |
ret = -ENXIO; |
goto err_out; |
} |
ret = setup_scratch_page(dev); |
if (ret) |
if (ret) { |
DRM_ERROR("Scratch setup failed\n"); |
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range; |
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries; |
return ret; |
goto err_out; |
} |
static void gen6_gmch_remove(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
iounmap(dev_priv->gtt.gsm); |
teardown_scratch_page(dev_priv->dev); |
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr, |
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t)); |
if (!dev_priv->mm.gtt->gtt) { |
DRM_ERROR("Failed to map the gtt page table\n"); |
ret = -ENOMEM; |
goto err_out; |
} |
static int i915_gmch_probe(struct drm_device *dev, |
size_t *gtt_total, |
size_t *stolen, |
phys_addr_t *mappable_base, |
unsigned long *mappable_end) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
int ret; |
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */ |
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8); |
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8); |
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20); |
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); |
if (!ret) { |
DRM_ERROR("failed to set up gmch\n"); |
return -EIO; |
} |
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end); |
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev); |
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range; |
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries; |
return 0; |
} |
static void i915_gmch_remove(struct drm_device *dev) |
{ |
// intel_gmch_remove(); |
err_out: |
kfree(dev_priv->mm.gtt); |
return ret; |
} |
int i915_gem_gtt_init(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct i915_gtt *gtt = &dev_priv->gtt; |
unsigned long gtt_size; |
int ret; |
if (INTEL_INFO(dev)->gen <= 5) { |
dev_priv->gtt.gtt_probe = i915_gmch_probe; |
dev_priv->gtt.gtt_remove = i915_gmch_remove; |
} else { |
dev_priv->gtt.gtt_probe = gen6_gmch_probe; |
dev_priv->gtt.gtt_remove = gen6_gmch_remove; |
} |
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total, |
&dev_priv->gtt.stolen_size, |
>t->mappable_base, |
>t->mappable_end); |
if (ret) |
return ret; |
gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t); |
/* GMADR is the PCI mmio aperture into the global GTT. */ |
DRM_INFO("Memory usable by graphics device = %zdM\n", |
dev_priv->gtt.total >> 20); |
DRM_DEBUG_DRIVER("GMADR size = %ldM\n", |
dev_priv->gtt.mappable_end >> 20); |
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", |
dev_priv->gtt.stolen_size >> 20); |
return 0; |
} |
struct scatterlist *sg_next(struct scatterlist *sg) |
{ |
if (sg_is_last(sg)) |
/drivers/video/drm/i915/i915_reg.h |
---|
141,15 → 141,8 |
#define VGA_MSR_MEM_EN (1<<1) |
#define VGA_MSR_CGA_MODE (1<<0) |
/* |
* SR01 is the only VGA register touched on non-UMS setups. |
* VLV doesn't do UMS, so the sequencer index/data registers |
* are the only VGA registers which need to include |
* display_mmio_offset. |
*/ |
#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4) |
#define SR01 1 |
#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5) |
#define VGA_SR_INDEX 0x3c4 |
#define VGA_SR_DATA 0x3c5 |
#define VGA_AR_INDEX 0x3c0 |
#define VGA_AR_VID_EN (1<<5) |
308,7 → 301,6 |
#define DISPLAY_PLANE_A (0<<20) |
#define DISPLAY_PLANE_B (1<<20) |
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) |
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */ |
#define PIPE_CONTROL_CS_STALL (1<<20) |
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18) |
#define PIPE_CONTROL_QW_WRITE (1<<14) |
343,10 → 335,8 |
* 0x801c/3c: core clock bits |
* 0x8048/68: low pass filter coefficients |
* 0x8100: fast clock controls |
* |
* DPIO is VLV only. |
*/ |
#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100) |
#define DPIO_PKT 0x2100 |
#define DPIO_RID (0<<24) |
#define DPIO_OP_WRITE (1<<16) |
#define DPIO_OP_READ (0<<16) |
353,9 → 343,9 |
#define DPIO_PORTID (0x12<<8) |
#define DPIO_BYTE (0xf<<4) |
#define DPIO_BUSY (1<<0) /* status only */ |
#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104) |
#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108) |
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110) |
#define DPIO_DATA 0x2104 |
#define DPIO_REG 0x2108 |
#define DPIO_CTL 0x2110 |
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */ |
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */ |
#define DPIO_SFR_BYPASS (1<<1) |
566,13 → 556,13 |
#define IIR 0x020a4 |
#define IMR 0x020a8 |
#define ISR 0x020ac |
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060) |
#define VLV_GUNIT_CLOCK_GATE 0x182060 |
#define GCFG_DIS (1<<8) |
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084) |
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0) |
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4) |
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8) |
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac) |
#define VLV_IIR_RW 0x182084 |
#define VLV_IER 0x1820a0 |
#define VLV_IIR 0x1820a4 |
#define VLV_IMR 0x1820a8 |
#define VLV_ISR 0x1820ac |
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) |
#define I915_DISPLAY_PORT_INTERRUPT (1<<17) |
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) |
745,7 → 735,6 |
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16) |
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16) |
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */ |
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15) |
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12) |
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12) |
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */ |
932,8 → 921,8 |
#define VGA1_PD_P1_DIV_2 (1 << 13) |
#define VGA1_PD_P1_SHIFT 8 |
#define VGA1_PD_P1_MASK (0x1f << 8) |
#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014) |
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018) |
#define _DPLL_A 0x06014 |
#define _DPLL_B 0x06018 |
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B) |
#define DPLL_VCO_ENABLE (1 << 31) |
#define DPLL_DVO_HIGH_SPEED (1 << 30) |
954,6 → 943,23 |
#define DPLL_LOCK_VLV (1<<15) |
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13) |
#define SRX_INDEX 0x3c4 |
#define SRX_DATA 0x3c5 |
#define SR01 1 |
#define SR01_SCREEN_OFF (1<<5) |
#define PPCR 0x61204 |
#define PPCR_ON (1<<0) |
#define DVOB 0x61140 |
#define DVOB_ON (1<<31) |
#define DVOC 0x61160 |
#define DVOC_ON (1<<31) |
#define LVDS 0x61180 |
#define LVDS_ON (1<<31) |
/* Scratch pad debug 0 reg: |
*/ |
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 |
/* |
* The i830 generation, in LVDS mode, defines P1 as the bit number set within |
992,7 → 998,7 |
#define SDVO_MULTIPLIER_MASK 0x000000ff |
#define SDVO_MULTIPLIER_SHIFT_HIRES 4 |
#define SDVO_MULTIPLIER_SHIFT_VGA 0 |
#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */ |
#define _DPLL_A_MD 0x0601c /* 965+ only */ |
/* |
* UDI pixel divider, controlling how many pixels are stuffed into a packet. |
* |
1029,7 → 1035,7 |
*/ |
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f |
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 |
#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */ |
#define _DPLL_B_MD 0x06020 /* 965+ only */ |
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD) |
#define _FPA0 0x06040 |
1172,7 → 1178,7 |
#define RAMCLK_GATE_D 0x6210 /* CRL only */ |
#define DEUC 0x6214 /* CRL only */ |
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500) |
#define FW_BLC_SELF_VLV 0x6500 |
#define FW_CSPWRDWNEN (1<<15) |
/* |
1179,8 → 1185,8 |
* Palette regs |
*/ |
#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000) |
#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800) |
#define _PALETTE_A 0x0a000 |
#define _PALETTE_B 0x0a800 |
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B) |
/* MCH MMIO space */ |
1236,10 → 1242,6 |
#define MAD_DIMM_A_SIZE_SHIFT 0 |
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT) |
/** snb MCH registers for priority tuning */ |
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10) |
#define MCH_SSKPD_WM0_MASK 0x3f |
#define MCH_SSKPD_WM0_VAL 0xc |
/* Clocking configuration register */ |
#define CLKCFG 0x10c00 |
1549,26 → 1551,26 |
*/ |
/* Pipe A timing regs */ |
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000) |
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004) |
#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008) |
#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c) |
#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010) |
#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014) |
#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c) |
#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020) |
#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028) |
#define _HTOTAL_A 0x60000 |
#define _HBLANK_A 0x60004 |
#define _HSYNC_A 0x60008 |
#define _VTOTAL_A 0x6000c |
#define _VBLANK_A 0x60010 |
#define _VSYNC_A 0x60014 |
#define _PIPEASRC 0x6001c |
#define _BCLRPAT_A 0x60020 |
#define _VSYNCSHIFT_A 0x60028 |
/* Pipe B timing regs */ |
#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000) |
#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004) |
#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008) |
#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c) |
#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010) |
#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014) |
#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c) |
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020) |
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028) |
#define _HTOTAL_B 0x61000 |
#define _HBLANK_B 0x61004 |
#define _HSYNC_B 0x61008 |
#define _VTOTAL_B 0x6100c |
#define _VBLANK_B 0x61010 |
#define _VSYNC_B 0x61014 |
#define _PIPEBSRC 0x6101c |
#define _BCLRPAT_B 0x61020 |
#define _VSYNCSHIFT_B 0x61028 |
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B) |
1613,9 → 1615,9 |
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16) |
#define ADPA_USE_VGA_HVPOLARITY (1<<15) |
#define ADPA_SETS_HVPOLARITY 0 |
#define ADPA_VSYNC_CNTL_DISABLE (1<<10) |
#define ADPA_VSYNC_CNTL_DISABLE (1<<11) |
#define ADPA_VSYNC_CNTL_ENABLE 0 |
#define ADPA_HSYNC_CNTL_DISABLE (1<<11) |
#define ADPA_HSYNC_CNTL_DISABLE (1<<10) |
#define ADPA_HSYNC_CNTL_ENABLE 0 |
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4) |
#define ADPA_VSYNC_ACTIVE_LOW 0 |
1629,10 → 1631,13 |
/* Hotplug control (945+ only) */ |
#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110) |
#define PORTB_HOTPLUG_INT_EN (1 << 29) |
#define PORTC_HOTPLUG_INT_EN (1 << 28) |
#define PORTD_HOTPLUG_INT_EN (1 << 27) |
#define PORT_HOTPLUG_EN 0x61110 |
#define HDMIB_HOTPLUG_INT_EN (1 << 29) |
#define DPB_HOTPLUG_INT_EN (1 << 29) |
#define HDMIC_HOTPLUG_INT_EN (1 << 28) |
#define DPC_HOTPLUG_INT_EN (1 << 28) |
#define HDMID_HOTPLUG_INT_EN (1 << 27) |
#define DPD_HOTPLUG_INT_EN (1 << 27) |
#define SDVOB_HOTPLUG_INT_EN (1 << 26) |
#define SDVOC_HOTPLUG_INT_EN (1 << 25) |
#define TV_HOTPLUG_INT_EN (1 << 18) |
1653,14 → 1658,21 |
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2) |
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) |
#define PORT_HOTPLUG_STAT 0x61114 |
/* HDMI/DP bits are gen4+ */ |
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) |
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) |
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) |
#define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
#define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
#define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
#define DPB_HOTPLUG_LIVE_STATUS (1 << 29) |
#define DPC_HOTPLUG_LIVE_STATUS (1 << 28) |
#define DPD_HOTPLUG_LIVE_STATUS (1 << 27) |
#define DPD_HOTPLUG_INT_STATUS (3 << 21) |
#define DPC_HOTPLUG_INT_STATUS (3 << 19) |
#define DPB_HOTPLUG_INT_STATUS (3 << 17) |
/* HDMI bits are shared with the DP bits */ |
#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29) |
#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28) |
#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27) |
#define HDMID_HOTPLUG_INT_STATUS (3 << 21) |
#define HDMIC_HOTPLUG_INT_STATUS (3 << 19) |
#define HDMIB_HOTPLUG_INT_STATUS (3 << 17) |
/* CRT/TV common between gen3+ */ |
#define CRT_HOTPLUG_INT_STATUS (1 << 11) |
#define TV_HOTPLUG_INT_STATUS (1 << 10) |
1865,7 → 1877,7 |
#define PP_DIVISOR 0x61210 |
/* Panel fitting */ |
#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230) |
#define PFIT_CONTROL 0x61230 |
#define PFIT_ENABLE (1 << 31) |
#define PFIT_PIPE_MASK (3 << 29) |
#define PFIT_PIPE_SHIFT 29 |
1883,7 → 1895,9 |
#define PFIT_SCALING_PROGRAMMED (1 << 26) |
#define PFIT_SCALING_PILLAR (2 << 26) |
#define PFIT_SCALING_LETTER (3 << 26) |
#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234) |
#define PFIT_PGM_RATIOS 0x61234 |
#define PFIT_VERT_SCALE_MASK 0xfff00000 |
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0 |
/* Pre-965 */ |
#define PFIT_VERT_SCALE_SHIFT 20 |
#define PFIT_VERT_SCALE_MASK 0xfff00000 |
1895,7 → 1909,7 |
#define PFIT_HORIZ_SCALE_SHIFT_965 0 |
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff |
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238) |
#define PFIT_AUTO_RATIOS 0x61238 |
/* Backlight control */ |
#define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
2625,10 → 2639,10 |
/* Display & cursor control */ |
/* Pipe A */ |
#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000) |
#define _PIPEADSL 0x70000 |
#define DSL_LINEMASK_GEN2 0x00000fff |
#define DSL_LINEMASK_GEN3 0x00001fff |
#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008) |
#define _PIPEACONF 0x70008 |
#define PIPECONF_ENABLE (1<<31) |
#define PIPECONF_DISABLE 0 |
#define PIPECONF_DOUBLE_WIDE (1<<30) |
2657,12 → 2671,11 |
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ |
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ |
#define PIPECONF_CXSR_DOWNCLOCK (1<<16) |
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) |
#define PIPECONF_BPC_MASK (0x7 << 5) |
#define PIPECONF_8BPC (0<<5) |
#define PIPECONF_10BPC (1<<5) |
#define PIPECONF_6BPC (2<<5) |
#define PIPECONF_12BPC (3<<5) |
#define PIPECONF_BPP_MASK (0x000000e0) |
#define PIPECONF_BPP_8 (0<<5) |
#define PIPECONF_BPP_10 (1<<5) |
#define PIPECONF_BPP_6 (2<<5) |
#define PIPECONF_BPP_12 (3<<5) |
#define PIPECONF_DITHER_EN (1<<4) |
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) |
#define PIPECONF_DITHER_TYPE_SP (0<<2) |
2669,7 → 2682,7 |
#define PIPECONF_DITHER_TYPE_ST1 (1<<2) |
#define PIPECONF_DITHER_TYPE_ST2 (2<<2) |
#define PIPECONF_DITHER_TYPE_TEMP (3<<2) |
#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024) |
#define _PIPEASTAT 0x70024 |
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) |
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30) |
#define PIPE_CRC_ERROR_ENABLE (1UL<<29) |
2680,7 → 2693,7 |
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) |
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) |
#define PIPE_DPST_EVENT_ENABLE (1UL<<23) |
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22) |
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26) |
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) |
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) |
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) |
2690,7 → 2703,7 |
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16) |
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) |
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15) |
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14) |
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15) |
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) |
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) |
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) |
2706,6 → 2719,11 |
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ |
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) |
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) |
#define PIPE_BPC_MASK (7 << 5) /* Ironlake */ |
#define PIPE_8BPC (0 << 5) |
#define PIPE_10BPC (1 << 5) |
#define PIPE_6BPC (2 << 5) |
#define PIPE_12BPC (3 << 5) |
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC) |
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF) |
2714,7 → 2732,7 |
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL) |
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT) |
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028) |
#define VLV_DPFLIPSTAT 0x70028 |
#define PIPEB_LINE_COMPARE_INT_EN (1<<29) |
#define PIPEB_HLINE_INT_EN (1<<28) |
#define PIPEB_VBLANK_INT_EN (1<<27) |
2728,7 → 2746,7 |
#define SPRITEA_FLIPDONE_INT_EN (1<<17) |
#define PLANEA_FLIPDONE_INT_EN (1<<16) |
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */ |
#define DPINVGTT 0x7002c /* VLV only */ |
#define CURSORB_INVALID_GTT_INT_EN (1<<23) |
#define CURSORA_INVALID_GTT_INT_EN (1<<22) |
#define SPRITED_INVALID_GTT_INT_EN (1<<21) |
2756,7 → 2774,7 |
#define DSPARB_BEND_SHIFT 9 /* on 855 */ |
#define DSPARB_AEND_SHIFT 0 |
#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034) |
#define DSPFW1 0x70034 |
#define DSPFW_SR_SHIFT 23 |
#define DSPFW_SR_MASK (0x1ff<<23) |
#define DSPFW_CURSORB_SHIFT 16 |
2764,11 → 2782,11 |
#define DSPFW_PLANEB_SHIFT 8 |
#define DSPFW_PLANEB_MASK (0x7f<<8) |
#define DSPFW_PLANEA_MASK (0x7f) |
#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038) |
#define DSPFW2 0x70038 |
#define DSPFW_CURSORA_MASK 0x00003f00 |
#define DSPFW_CURSORA_SHIFT 8 |
#define DSPFW_PLANEC_MASK (0x7f) |
#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c) |
#define DSPFW3 0x7003c |
#define DSPFW_HPLL_SR_EN (1<<31) |
#define DSPFW_CURSOR_SR_SHIFT 24 |
#define PINEVIEW_SELF_REFRESH_EN (1<<30) |
2780,13 → 2798,13 |
/* drain latency register values*/ |
#define DRAIN_LATENCY_PRECISION_32 32 |
#define DRAIN_LATENCY_PRECISION_16 16 |
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050) |
#define VLV_DDL1 0x70050 |
#define DDL_CURSORA_PRECISION_32 (1<<31) |
#define DDL_CURSORA_PRECISION_16 (0<<31) |
#define DDL_CURSORA_SHIFT 24 |
#define DDL_PLANEA_PRECISION_32 (1<<7) |
#define DDL_PLANEA_PRECISION_16 (0<<7) |
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054) |
#define VLV_DDL2 0x70054 |
#define DDL_CURSORB_PRECISION_32 (1<<31) |
#define DDL_CURSORB_PRECISION_16 (0<<31) |
#define DDL_CURSORB_SHIFT 24 |
2930,10 → 2948,10 |
* } while (high1 != high2); |
* frame = (high1 << 8) | low1; |
*/ |
#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040) |
#define _PIPEAFRAMEHIGH 0x70040 |
#define PIPE_FRAME_HIGH_MASK 0x0000ffff |
#define PIPE_FRAME_HIGH_SHIFT 0 |
#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044) |
#define _PIPEAFRAMEPIXEL 0x70044 |
#define PIPE_FRAME_LOW_MASK 0xff000000 |
#define PIPE_FRAME_LOW_SHIFT 24 |
#define PIPE_PIXEL_MASK 0x00ffffff |
2944,12 → 2962,11 |
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45) |
/* Cursor A & B regs */ |
#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080) |
#define _CURACNTR 0x70080 |
/* Old style CUR*CNTR flags (desktop 8xx) */ |
#define CURSOR_ENABLE 0x80000000 |
#define CURSOR_GAMMA_ENABLE 0x40000000 |
#define CURSOR_STRIDE_MASK 0x30000000 |
#define CURSOR_PIPE_CSC_ENABLE (1<<24) |
#define CURSOR_FORMAT_SHIFT 24 |
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) |
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) |
2966,16 → 2983,16 |
#define MCURSOR_PIPE_A 0x00 |
#define MCURSOR_PIPE_B (1 << 28) |
#define MCURSOR_GAMMA_ENABLE (1 << 26) |
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) |
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) |
#define _CURABASE 0x70084 |
#define _CURAPOS 0x70088 |
#define CURSOR_POS_MASK 0x007FF |
#define CURSOR_POS_SIGN 0x8000 |
#define CURSOR_X_SHIFT 0 |
#define CURSOR_Y_SHIFT 16 |
#define CURSIZE 0x700a0 |
#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0) |
#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4) |
#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8) |
#define _CURBCNTR 0x700c0 |
#define _CURBBASE 0x700c4 |
#define _CURBPOS 0x700c8 |
#define _CURBCNTR_IVB 0x71080 |
#define _CURBBASE_IVB 0x71084 |
2990,7 → 3007,7 |
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) |
/* Display A control */ |
#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180) |
#define _DSPACNTR 0x70180 |
#define DISPLAY_PLANE_ENABLE (1<<31) |
#define DISPLAY_PLANE_DISABLE 0 |
#define DISPPLANE_GAMMA_ENABLE (1<<30) |
3011,7 → 3028,6 |
#define DISPPLANE_RGBA888 (0xf<<26) |
#define DISPPLANE_STEREO_ENABLE (1<<25) |
#define DISPPLANE_STEREO_DISABLE 0 |
#define DISPPLANE_PIPE_CSC_ENABLE (1<<24) |
#define DISPPLANE_SEL_PIPE_SHIFT 24 |
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT) |
#define DISPPLANE_SEL_PIPE_A 0 |
3024,14 → 3040,14 |
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) |
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ |
#define DISPPLANE_TILED (1<<10) |
#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184) |
#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188) |
#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */ |
#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190) |
#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */ |
#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */ |
#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */ |
#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC) |
#define _DSPAADDR 0x70184 |
#define _DSPASTRIDE 0x70188 |
#define _DSPAPOS 0x7018C /* reserved */ |
#define _DSPASIZE 0x70190 |
#define _DSPASURF 0x7019C /* 965+ only */ |
#define _DSPATILEOFF 0x701A4 /* 965+ only */ |
#define _DSPAOFFSET 0x701A4 /* HSW */ |
#define _DSPASURFLIVE 0x701AC |
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR) |
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR) |
3052,44 → 3068,44 |
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg)))) |
/* VBIOS flags */ |
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410) |
#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414) |
#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418) |
#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c) |
#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420) |
#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424) |
#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428) |
#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410) |
#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414) |
#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420) |
#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414) |
#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418) |
#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c) |
#define SWF00 0x71410 |
#define SWF01 0x71414 |
#define SWF02 0x71418 |
#define SWF03 0x7141c |
#define SWF04 0x71420 |
#define SWF05 0x71424 |
#define SWF06 0x71428 |
#define SWF10 0x70410 |
#define SWF11 0x70414 |
#define SWF14 0x71420 |
#define SWF30 0x72414 |
#define SWF31 0x72418 |
#define SWF32 0x7241c |
/* Pipe B */ |
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000) |
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008) |
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024) |
#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040) |
#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044) |
#define _PIPEBDSL 0x71000 |
#define _PIPEBCONF 0x71008 |
#define _PIPEBSTAT 0x71024 |
#define _PIPEBFRAMEHIGH 0x71040 |
#define _PIPEBFRAMEPIXEL 0x71044 |
#define _PIPEB_FRMCOUNT_GM45 0x71040 |
#define _PIPEB_FLIPCOUNT_GM45 0x71044 |
/* Display B control */ |
#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180) |
#define _DSPBCNTR 0x71180 |
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) |
#define DISPPLANE_ALPHA_TRANS_DISABLE 0 |
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 |
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) |
#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184) |
#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188) |
#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C) |
#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190) |
#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C) |
#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4) |
#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4) |
#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC) |
#define _DSPBADDR 0x71184 |
#define _DSPBSTRIDE 0x71188 |
#define _DSPBPOS 0x7118C |
#define _DSPBSIZE 0x71190 |
#define _DSPBSURF 0x7119C |
#define _DSPBTILEOFF 0x711A4 |
#define _DSPBOFFSET 0x711A4 |
#define _DSPBSURFLIVE 0x711AC |
/* Sprite A control */ |
#define _DVSACNTR 0x72180 |
3100,7 → 3116,6 |
#define DVS_FORMAT_RGBX101010 (1<<25) |
#define DVS_FORMAT_RGBX888 (2<<25) |
#define DVS_FORMAT_RGBX161616 (3<<25) |
#define DVS_PIPE_CSC_ENABLE (1<<24) |
#define DVS_SOURCE_KEY (1<<22) |
#define DVS_RGB_ORDER_XBGR (1<<20) |
#define DVS_YUV_BYTE_ORDER_MASK (3<<16) |
3168,7 → 3183,7 |
#define SPRITE_FORMAT_RGBX161616 (3<<25) |
#define SPRITE_FORMAT_YUV444 (4<<25) |
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */ |
#define SPRITE_PIPE_CSC_ENABLE (1<<24) |
#define SPRITE_CSC_ENABLE (1<<24) |
#define SPRITE_SOURCE_KEY (1<<22) |
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */ |
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19) |
3239,8 → 3254,6 |
# define VGA_2X_MODE (1 << 30) |
# define VGA_PIPE_B_SELECT (1 << 29) |
#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400) |
/* Ironlake */ |
#define CPU_VGACNTRL 0x41000 |
3281,41 → 3294,41 |
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff |
#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030) |
#define _PIPEA_DATA_M1 0x60030 |
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */ |
#define TU_SIZE_MASK 0x7e000000 |
#define PIPE_DATA_M1_OFFSET 0 |
#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034) |
#define _PIPEA_DATA_N1 0x60034 |
#define PIPE_DATA_N1_OFFSET 0 |
#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038) |
#define _PIPEA_DATA_M2 0x60038 |
#define PIPE_DATA_M2_OFFSET 0 |
#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c) |
#define _PIPEA_DATA_N2 0x6003c |
#define PIPE_DATA_N2_OFFSET 0 |
#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040) |
#define _PIPEA_LINK_M1 0x60040 |
#define PIPE_LINK_M1_OFFSET 0 |
#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044) |
#define _PIPEA_LINK_N1 0x60044 |
#define PIPE_LINK_N1_OFFSET 0 |
#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048) |
#define _PIPEA_LINK_M2 0x60048 |
#define PIPE_LINK_M2_OFFSET 0 |
#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c) |
#define _PIPEA_LINK_N2 0x6004c |
#define PIPE_LINK_N2_OFFSET 0 |
/* PIPEB timing regs are same start from 0x61000 */ |
#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030) |
#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034) |
#define _PIPEB_DATA_M1 0x61030 |
#define _PIPEB_DATA_N1 0x61034 |
#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038) |
#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c) |
#define _PIPEB_DATA_M2 0x61038 |
#define _PIPEB_DATA_N2 0x6103c |
#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040) |
#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044) |
#define _PIPEB_LINK_M1 0x61040 |
#define _PIPEB_LINK_N1 0x61044 |
#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048) |
#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c) |
#define _PIPEB_LINK_M2 0x61048 |
#define _PIPEB_LINK_N2 0x6104c |
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1) |
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1) |
3568,10 → 3581,9 |
#define PORTD_PULSE_DURATION_6ms (2 << 18) |
#define PORTD_PULSE_DURATION_100ms (3 << 18) |
#define PORTD_PULSE_DURATION_MASK (3 << 18) |
#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16) |
#define PORTD_HOTPLUG_NO_DETECT (0 << 16) |
#define PORTD_HOTPLUG_NO_DETECT (0) |
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) |
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16) |
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17) |
#define PORTC_HOTPLUG_ENABLE (1 << 12) |
#define PORTC_PULSE_DURATION_2ms (0) |
#define PORTC_PULSE_DURATION_4_5ms (1 << 10) |
3578,10 → 3590,9 |
#define PORTC_PULSE_DURATION_6ms (2 << 10) |
#define PORTC_PULSE_DURATION_100ms (3 << 10) |
#define PORTC_PULSE_DURATION_MASK (3 << 10) |
#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8) |
#define PORTC_HOTPLUG_NO_DETECT (0 << 8) |
#define PORTC_HOTPLUG_NO_DETECT (0) |
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) |
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8) |
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9) |
#define PORTB_HOTPLUG_ENABLE (1 << 4) |
#define PORTB_PULSE_DURATION_2ms (0) |
#define PORTB_PULSE_DURATION_4_5ms (1 << 2) |
3588,10 → 3599,9 |
#define PORTB_PULSE_DURATION_6ms (2 << 2) |
#define PORTB_PULSE_DURATION_100ms (3 << 2) |
#define PORTB_PULSE_DURATION_MASK (3 << 2) |
#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0) |
#define PORTB_HOTPLUG_NO_DETECT (0 << 0) |
#define PORTB_HOTPLUG_NO_DETECT (0) |
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) |
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0) |
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1) |
#define PCH_GPIOA 0xc5010 |
#define PCH_GPIOB 0xc5014 |
3712,13 → 3722,13 |
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B) |
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B) |
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200) |
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208) |
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210) |
#define VLV_VIDEO_DIP_CTL_A 0x60200 |
#define VLV_VIDEO_DIP_DATA_A 0x60208 |
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210 |
#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170) |
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174) |
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178) |
#define VLV_VIDEO_DIP_CTL_B 0x61170 |
#define VLV_VIDEO_DIP_DATA_B 0x61174 |
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178 |
#define VLV_TVIDEO_DIP_CTL(pipe) \ |
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B) |
3810,6 → 3820,8 |
#define TRANS_FSYNC_DELAY_HB2 (1<<27) |
#define TRANS_FSYNC_DELAY_HB3 (2<<27) |
#define TRANS_FSYNC_DELAY_HB4 (3<<27) |
#define TRANS_DP_AUDIO_ONLY (1<<26) |
#define TRANS_DP_VIDEO_AUDIO (0<<26) |
#define TRANS_INTERLACE_MASK (7<<21) |
#define TRANS_PROGRESSIVE (0<<21) |
#define TRANS_INTERLACED (3<<21) |
3915,7 → 3927,7 |
#define FDI_10BPC (1<<16) |
#define FDI_6BPC (2<<16) |
#define FDI_12BPC (3<<16) |
#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15) |
#define FDI_LINK_REVERSE_OVERWRITE (1<<15) |
#define FDI_DMI_LINK_REVERSE_MASK (1<<14) |
#define FDI_RX_PLL_ENABLE (1<<13) |
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11) |
4008,17 → 4020,17 |
#define LVDS_DETECTED (1 << 1) |
/* vlv has 2 sets of panel control regs. */ |
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200) |
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204) |
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208) |
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c) |
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210) |
#define PIPEA_PP_STATUS 0x61200 |
#define PIPEA_PP_CONTROL 0x61204 |
#define PIPEA_PP_ON_DELAYS 0x61208 |
#define PIPEA_PP_OFF_DELAYS 0x6120c |
#define PIPEA_PP_DIVISOR 0x61210 |
#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300) |
#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304) |
#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308) |
#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c) |
#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310) |
#define PIPEB_PP_STATUS 0x61300 |
#define PIPEB_PP_CONTROL 0x61304 |
#define PIPEB_PP_ON_DELAYS 0x61308 |
#define PIPEB_PP_OFF_DELAYS 0x6130c |
#define PIPEB_PP_DIVISOR 0x61310 |
#define PCH_PP_STATUS 0xc7200 |
#define PCH_PP_CONTROL 0xc7204 |
4199,9 → 4211,7 |
#define GEN6_RP_INTERRUPT_LIMITS 0xA014 |
#define GEN6_RPSTAT1 0xA01C |
#define GEN6_CAGF_SHIFT 8 |
#define HSW_CAGF_SHIFT 7 |
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) |
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) |
#define GEN6_RP_CONTROL 0xA024 |
#define GEN6_RP_MEDIA_TURBO (1<<11) |
#define GEN6_RP_MEDIA_MODE_MASK (3<<9) |
4270,8 → 4280,8 |
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9 |
#define GEN6_PCODE_WRITE_RC6VIDS 0x4 |
#define GEN6_PCODE_READ_RC6VIDS 0x5 |
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) |
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) |
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0 |
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0) |
#define GEN6_PCODE_DATA 0x138128 |
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 |
4312,7 → 4322,7 |
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4 |
#define DOP_CLOCK_GATING_DISABLE (1<<0) |
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020) |
#define G4X_AUD_VID_DID 0x62020 |
#define INTEL_AUDIO_DEVCL 0x808629FB |
#define INTEL_AUDIO_DEVBLC 0x80862801 |
#define INTEL_AUDIO_DEVCTG 0x80862802 |
4428,10 → 4438,10 |
#define AUDIO_CP_READY_C (1<<9) |
/* HSW Power Wells */ |
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ |
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */ |
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */ |
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */ |
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */ |
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */ |
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */ |
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */ |
#define HSW_PWR_WELL_ENABLE (1<<31) |
#define HSW_PWR_WELL_STATE (1<<30) |
#define HSW_PWR_WELL_CTL5 0x45410 |
4514,7 → 4524,6 |
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */ |
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */ |
#define DDI_BUF_EMP_MASK (0xf<<24) |
#define DDI_BUF_PORT_REVERSAL (1<<16) |
#define DDI_BUF_IS_IDLE (1<<7) |
#define DDI_A_4_LANES (1<<4) |
#define DDI_PORT_WIDTH_X1 (0<<1) |
4648,51 → 4657,4 |
#define WM_DBG_DISALLOW_MAXFIFO (1<<1) |
#define WM_DBG_DISALLOW_SPRITE (1<<2) |
/* pipe CSC */ |
#define _PIPE_A_CSC_COEFF_RY_GY 0x49010 |
#define _PIPE_A_CSC_COEFF_BY 0x49014 |
#define _PIPE_A_CSC_COEFF_RU_GU 0x49018 |
#define _PIPE_A_CSC_COEFF_BU 0x4901c |
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020 |
#define _PIPE_A_CSC_COEFF_BV 0x49024 |
#define _PIPE_A_CSC_MODE 0x49028 |
#define _PIPE_A_CSC_PREOFF_HI 0x49030 |
#define _PIPE_A_CSC_PREOFF_ME 0x49034 |
#define _PIPE_A_CSC_PREOFF_LO 0x49038 |
#define _PIPE_A_CSC_POSTOFF_HI 0x49040 |
#define _PIPE_A_CSC_POSTOFF_ME 0x49044 |
#define _PIPE_A_CSC_POSTOFF_LO 0x49048 |
#define _PIPE_B_CSC_COEFF_RY_GY 0x49110 |
#define _PIPE_B_CSC_COEFF_BY 0x49114 |
#define _PIPE_B_CSC_COEFF_RU_GU 0x49118 |
#define _PIPE_B_CSC_COEFF_BU 0x4911c |
#define _PIPE_B_CSC_COEFF_RV_GV 0x49120 |
#define _PIPE_B_CSC_COEFF_BV 0x49124 |
#define _PIPE_B_CSC_MODE 0x49128 |
#define _PIPE_B_CSC_PREOFF_HI 0x49130 |
#define _PIPE_B_CSC_PREOFF_ME 0x49134 |
#define _PIPE_B_CSC_PREOFF_LO 0x49138 |
#define _PIPE_B_CSC_POSTOFF_HI 0x49140 |
#define _PIPE_B_CSC_POSTOFF_ME 0x49144 |
#define _PIPE_B_CSC_POSTOFF_LO 0x49148 |
#define CSC_BLACK_SCREEN_OFFSET (1 << 2) |
#define CSC_POSITION_BEFORE_GAMMA (1 << 1) |
#define CSC_MODE_YUV_TO_RGB (1 << 0) |
#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY) |
#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY) |
#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU) |
#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU) |
#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV) |
#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV) |
#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE) |
#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI) |
#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME) |
#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO) |
#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI) |
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME) |
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO) |
#endif /* _I915_REG_H_ */ |
/drivers/video/drm/i915/intel_crt.c |
---|
44,9 → 44,6 |
struct intel_crt { |
struct intel_encoder base; |
/* DPMS state is stored in the connector, which we need in the |
* encoder's enable/disable callbacks */ |
struct intel_connector *connector; |
bool force_hotplug_required; |
u32 adpa_reg; |
}; |
83,6 → 80,29 |
return true; |
} |
static void intel_disable_crt(struct intel_encoder *encoder) |
{ |
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
struct intel_crt *crt = intel_encoder_to_crt(encoder); |
u32 temp; |
temp = I915_READ(crt->adpa_reg); |
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); |
temp &= ~ADPA_DAC_ENABLE; |
I915_WRITE(crt->adpa_reg, temp); |
} |
static void intel_enable_crt(struct intel_encoder *encoder) |
{ |
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; |
struct intel_crt *crt = intel_encoder_to_crt(encoder); |
u32 temp; |
temp = I915_READ(crt->adpa_reg); |
temp |= ADPA_DAC_ENABLE; |
I915_WRITE(crt->adpa_reg, temp); |
} |
/* Note: The caller is required to filter out dpms modes not supported by the |
* platform. */ |
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) |
114,19 → 134,6 |
I915_WRITE(crt->adpa_reg, temp); |
} |
static void intel_disable_crt(struct intel_encoder *encoder) |
{ |
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF); |
} |
static void intel_enable_crt(struct intel_encoder *encoder) |
{ |
struct intel_crt *crt = intel_encoder_to_crt(encoder); |
intel_crt_set_dpms(encoder, crt->connector->base.dpms); |
} |
static void intel_crt_dpms(struct drm_connector *connector, int mode) |
{ |
struct drm_device *dev = connector->dev; |
252,8 → 259,6 |
u32 adpa; |
bool ret; |
ENTER(); |
/* The first time through, trigger an explicit detection cycle */ |
if (crt->force_hotplug_required) { |
bool turn_off_dac = HAS_PCH_SPLIT(dev); |
261,7 → 266,7 |
crt->force_hotplug_required = 0; |
save_adpa = adpa = I915_READ(crt->adpa_reg); |
save_adpa = adpa = I915_READ(PCH_ADPA); |
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
268,20 → 273,20 |
if (turn_off_dac) |
adpa &= ~ADPA_DAC_ENABLE; |
I915_WRITE(crt->adpa_reg, adpa); |
I915_WRITE(PCH_ADPA, adpa); |
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
1000)) |
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
if (turn_off_dac) { |
I915_WRITE(crt->adpa_reg, save_adpa); |
POSTING_READ(crt->adpa_reg); |
I915_WRITE(PCH_ADPA, save_adpa); |
POSTING_READ(PCH_ADPA); |
} |
} |
/* Check the status to see if both blue and green are on now */ |
adpa = I915_READ(crt->adpa_reg); |
adpa = I915_READ(PCH_ADPA); |
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
ret = true; |
else |
288,8 → 293,6 |
ret = false; |
DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); |
LEAVE(); |
return ret; |
} |
296,29 → 299,26 |
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) |
{ |
struct drm_device *dev = connector->dev; |
struct intel_crt *crt = intel_attached_crt(connector); |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 adpa; |
bool ret; |
u32 save_adpa; |
ENTER(); |
save_adpa = adpa = I915_READ(crt->adpa_reg); |
save_adpa = adpa = I915_READ(ADPA); |
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
I915_WRITE(crt->adpa_reg, adpa); |
I915_WRITE(ADPA, adpa); |
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
1000)) { |
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
I915_WRITE(crt->adpa_reg, save_adpa); |
I915_WRITE(ADPA, save_adpa); |
} |
/* Check the status to see if both blue and green are on now */ |
adpa = I915_READ(crt->adpa_reg); |
adpa = I915_READ(ADPA); |
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
ret = true; |
else |
329,8 → 329,6 |
/* FIXME: debug force function and remove */ |
ret = true; |
LEAVE(); |
return ret; |
} |
350,8 → 348,6 |
bool ret = false; |
int i, tries = 0; |
ENTER(); |
if (HAS_PCH_SPLIT(dev)) |
return intel_ironlake_crt_detect_hotplug(connector); |
390,8 → 386,6 |
/* and put the bits back */ |
I915_WRITE(PORT_HOTPLUG_EN, orig); |
LEAVE(); |
return ret; |
} |
400,8 → 394,6 |
{ |
struct edid *edid; |
ENTER(); |
edid = drm_get_edid(connector, i2c); |
if (!edid && !intel_gmbus_is_forced_bit(i2c)) { |
411,8 → 403,6 |
intel_gmbus_force_bit(i2c, false); |
} |
LEAVE(); |
return edid; |
} |
674,11 → 664,11 |
if (HAS_PCH_SPLIT(dev)) { |
u32 adpa; |
adpa = I915_READ(crt->adpa_reg); |
adpa = I915_READ(PCH_ADPA); |
adpa &= ~ADPA_CRT_HOTPLUG_MASK; |
adpa |= ADPA_HOTPLUG_BITS; |
I915_WRITE(crt->adpa_reg, adpa); |
POSTING_READ(crt->adpa_reg); |
I915_WRITE(PCH_ADPA, adpa); |
POSTING_READ(PCH_ADPA); |
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); |
crt->force_hotplug_required = 1; |
693,6 → 683,7 |
static const struct drm_encoder_helper_funcs crt_encoder_funcs = { |
.mode_fixup = intel_crt_mode_fixup, |
.mode_set = intel_crt_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_connector_funcs intel_crt_connector_funcs = { |
732,7 → 723,6 |
} |
connector = &intel_connector->base; |
crt->connector = intel_connector; |
drm_connector_init(dev, &intel_connector->base, |
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
763,7 → 753,7 |
crt->base.disable = intel_disable_crt; |
crt->base.enable = intel_enable_crt; |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
crt->base.get_hw_state = intel_ddi_get_hw_state; |
else |
crt->base.get_hw_state = intel_crt_get_hw_state; |
787,14 → 777,10 |
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
/* |
* TODO: find a proper way to discover whether we need to set the the |
* polarity and link reversal bits or not, instead of relying on the |
* BIOS. |
* TODO: find a proper way to discover whether we need to set the |
* polarity reversal bit or not, instead of relying on the BIOS. |
*/ |
if (HAS_PCH_LPT(dev)) { |
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | |
FDI_RX_LINK_REVERSAL_OVERRIDE; |
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; |
if (HAS_PCH_LPT(dev)) |
dev_priv->fdi_rx_polarity_reversed = |
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT); |
} |
} |
/drivers/video/drm/i915/intel_ddi.c |
---|
84,8 → 84,7 |
* in either FDI or DP modes only, as HDMI connections will work with both |
* of those |
*/ |
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, |
bool use_fdi_mode) |
void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 reg; |
115,18 → 114,17 |
{ |
int port; |
if (!HAS_DDI(dev)) |
return; |
if (IS_HASWELL(dev)) { |
for (port = PORT_A; port < PORT_E; port++) |
intel_prepare_ddi_buffers(dev, port, false); |
/* DDI E is the suggested one to work in FDI mode, so program is as such |
* by default. It will have to be re-programmed in case a digital DP |
* output will be detected on it |
/* DDI E is the suggested one to work in FDI mode, so program is as such by |
* default. It will have to be re-programmed in case a digital DP output |
* will be detected on it |
*/ |
intel_prepare_ddi_buffers(dev, PORT_E, true); |
} |
} |
static const long hsw_ddi_buf_ctl_values[] = { |
DDI_BUF_EMP_400MV_0DB_HSW, |
180,8 → 178,10 |
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); |
/* Enable the PCH Receiver FDI PLL */ |
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | |
FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19); |
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE | |
((intel_crtc->fdi_lanes - 1) << 19); |
if (dev_priv->fdi_rx_polarity_reversed) |
rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT; |
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); |
POSTING_READ(_FDI_RXA_CTL); |
udelay(220); |
203,10 → 203,7 |
DP_TP_CTL_LINK_TRAIN_PAT1 | |
DP_TP_CTL_ENABLE); |
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage. |
* DDI E does not support port reversal, the functionality is |
* achieved on the PCH side in FDI_RX_CTL, so no need to set the |
* port reversal bit */ |
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */ |
I915_WRITE(DDI_BUF_CTL(PORT_E), |
DDI_BUF_CTL_ENABLE | |
((intel_crtc->fdi_lanes - 1) << 1) | |
678,14 → 675,10 |
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n", |
port_name(port), pipe_name(pipe)); |
intel_crtc->eld_vld = false; |
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
struct intel_digital_port *intel_dig_port = |
enc_to_dig_port(encoder); |
intel_dp->DP = intel_dig_port->port_reversal | |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; |
switch (intel_dp->lane_count) { |
case 1: |
intel_dp->DP |= DDI_PORT_WIDTH_X1; |
992,13 → 985,7 |
if (cpu_transcoder == TRANSCODER_EDP) { |
switch (pipe) { |
case PIPE_A: |
/* Can only use the always-on power well for eDP when |
* not using the panel fitter, and when not using motion |
* blur mitigation (which we don't support). */ |
if (dev_priv->pch_pf_size) |
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF; |
else |
temp |= TRANS_DDI_EDP_INPUT_A_ON; |
break; |
case PIPE_B: |
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF; |
1082,7 → 1069,7 |
if (port == PORT_A) |
cpu_transcoder = TRANSCODER_EDP; |
else |
cpu_transcoder = (enum transcoder) pipe; |
cpu_transcoder = pipe; |
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); |
1298,48 → 1285,28 |
static void intel_enable_ddi(struct intel_encoder *intel_encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_crtc *crtc = encoder->crtc; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
struct drm_device *dev = encoder->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
enum port port = intel_ddi_get_encoder_port(intel_encoder); |
int type = intel_encoder->type; |
uint32_t tmp; |
if (type == INTEL_OUTPUT_HDMI) { |
struct intel_digital_port *intel_dig_port = |
enc_to_dig_port(encoder); |
/* In HDMI/DVI mode, the port width, and swing/emphasis values |
* are ignored so nothing special needs to be done besides |
* enabling the port. |
*/ |
I915_WRITE(DDI_BUF_CTL(port), |
intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); |
I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE); |
} else if (type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
ironlake_edp_backlight_on(intel_dp); |
} |
if (intel_crtc->eld_vld) { |
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); |
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); |
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); |
} |
} |
static void intel_disable_ddi(struct intel_encoder *intel_encoder) |
{ |
struct drm_encoder *encoder = &intel_encoder->base; |
struct drm_crtc *crtc = encoder->crtc; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int pipe = intel_crtc->pipe; |
int type = intel_encoder->type; |
struct drm_device *dev = encoder->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t tmp; |
if (type == INTEL_OUTPUT_EDP) { |
struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1346,10 → 1313,6 |
ironlake_edp_backlight_off(intel_dp); |
} |
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); |
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4)); |
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); |
} |
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv) |
1391,8 → 1354,8 |
struct intel_dp *intel_dp = &intel_dig_port->dp; |
struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
enum port port = intel_dig_port->port; |
bool wait; |
uint32_t val; |
bool wait = false; |
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { |
val = I915_READ(DDI_BUF_CTL(port)); |
1489,11 → 1452,11 |
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = { |
.mode_fixup = intel_ddi_mode_fixup, |
.mode_set = intel_ddi_mode_set, |
.disable = intel_encoder_noop, |
}; |
void intel_ddi_init(struct drm_device *dev, enum port port) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_digital_port *intel_dig_port; |
struct intel_encoder *intel_encoder; |
struct drm_encoder *encoder; |
1534,8 → 1497,6 |
intel_encoder->get_hw_state = intel_ddi_get_hw_state; |
intel_dig_port->port = port; |
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & |
DDI_BUF_PORT_REVERSAL; |
if (hdmi_connector) |
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port); |
else |
/drivers/video/drm/i915/intel_dp.c |
---|
148,6 → 148,15 |
return max_link_bw; |
} |
static int |
intel_dp_link_clock(uint8_t link_bw) |
{ |
if (link_bw == DP_LINK_BW_2_7) |
return 270000; |
else |
return 162000; |
} |
/* |
* The units on the numbers in the next two are... bizarre. Examples will |
* make it clearer; this one parallels an example in the eDP spec. |
182,8 → 191,7 |
struct drm_display_mode *mode, |
bool adjust_mode) |
{ |
int max_link_clock = |
drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp)); |
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd); |
int max_rate, mode_rate; |
322,49 → 330,6 |
} |
} |
static uint32_t |
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t ch_ctl = intel_dp->output_reg + 0x10; |
uint32_t status; |
bool done; |
if (IS_HASWELL(dev)) { |
switch (intel_dig_port->port) { |
case PORT_A: |
ch_ctl = DPA_AUX_CH_CTL; |
break; |
case PORT_B: |
ch_ctl = PCH_DPB_AUX_CH_CTL; |
break; |
case PORT_C: |
ch_ctl = PCH_DPC_AUX_CH_CTL; |
break; |
case PORT_D: |
ch_ctl = PCH_DPD_AUX_CH_CTL; |
break; |
default: |
BUG(); |
} |
} |
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
if (has_aux_irq) |
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C, |
msecs_to_jiffies(10)); |
else |
done = wait_for_atomic(C, 10) == 0; |
if (!done) |
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n", |
has_aux_irq); |
#undef C |
return status; |
} |
static int |
intel_dp_aux_ch(struct intel_dp *intel_dp, |
uint8_t *send, int send_bytes, |
376,18 → 341,12 |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t ch_ctl = output_reg + 0x10; |
uint32_t ch_data = ch_ctl + 4; |
int i, ret, recv_bytes; |
int i; |
int recv_bytes; |
uint32_t status; |
uint32_t aux_clock_divider; |
int try, precharge; |
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev); |
/* dp aux is extremely sensitive to irq latency, hence request the |
* lowest possible wakeup latency and so prevent the cpu from going into |
* deep sleep states. |
*/ |
// pm_qos_update_request(&dev_priv->pm_qos, 0); |
if (IS_HASWELL(dev)) { |
switch (intel_dig_port->port) { |
case PORT_A: |
420,7 → 379,7 |
* clock divider. |
*/ |
if (is_cpu_edp(intel_dp)) { |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1; |
else if (IS_VALLEYVIEW(dev)) |
aux_clock_divider = 100; |
440,7 → 399,7 |
/* Try to wait for any previous AUX channel activity */ |
for (try = 0; try < 3; try++) { |
status = I915_READ_NOTRACE(ch_ctl); |
status = I915_READ(ch_ctl); |
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
break; |
msleep(1); |
449,8 → 408,7 |
if (try == 3) { |
WARN(1, "dp_aux_ch not started status 0x%08x\n", |
I915_READ(ch_ctl)); |
ret = -EBUSY; |
goto out; |
return -EBUSY; |
} |
/* Must try at least 3 times according to DP spec */ |
463,7 → 421,6 |
/* Send the command and wait for it to complete */ |
I915_WRITE(ch_ctl, |
DP_AUX_CH_CTL_SEND_BUSY | |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) | |
DP_AUX_CH_CTL_TIME_OUT_400us | |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | |
471,9 → 428,13 |
DP_AUX_CH_CTL_DONE | |
DP_AUX_CH_CTL_TIME_OUT_ERROR | |
DP_AUX_CH_CTL_RECEIVE_ERROR); |
for (;;) { |
status = I915_READ(ch_ctl); |
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) |
break; |
udelay(100); |
} |
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq); |
/* Clear done status and any errors */ |
I915_WRITE(ch_ctl, |
status | |
490,8 → 451,7 |
if ((status & DP_AUX_CH_CTL_DONE) == 0) { |
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); |
ret = -EBUSY; |
goto out; |
return -EBUSY; |
} |
/* Check for timeout or receive error. |
499,8 → 459,7 |
*/ |
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { |
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); |
ret = -EIO; |
goto out; |
return -EIO; |
} |
/* Timeouts occur when the device isn't connected, so they're |
507,8 → 466,7 |
* "normal" -- don't fill the kernel log with these */ |
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { |
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); |
ret = -ETIMEDOUT; |
goto out; |
return -ETIMEDOUT; |
} |
/* Unload any bytes sent back from the other side */ |
521,11 → 479,7 |
unpack_aux(I915_READ(ch_data + i), |
recv + i, recv_bytes - i); |
ret = recv_bytes; |
out: |
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); |
return ret; |
return recv_bytes; |
} |
/* Write data to the aux channel in native mode */ |
764,35 → 718,16 |
return false; |
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; |
if (intel_dp->color_range_auto) { |
/* |
* See: |
* CEA-861-E - 5.1 Default Encoding Parameters |
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry |
*/ |
if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1) |
intel_dp->color_range = DP_COLOR_RANGE_16_235; |
else |
intel_dp->color_range = 0; |
} |
if (intel_dp->color_range) |
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; |
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); |
for (clock = 0; clock <= max_clock; clock++) { |
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
int link_bw_clock = |
drm_dp_bw_code_to_link_rate(bws[clock]); |
int link_avail = intel_dp_max_data_rate(link_bw_clock, |
lane_count); |
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
if (mode_rate <= link_avail) { |
intel_dp->link_bw = bws[clock]; |
intel_dp->lane_count = lane_count; |
adjusted_mode->clock = link_bw_clock; |
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); |
DRM_DEBUG_KMS("DP link bw %02x lane " |
"count %d clock %d bpp %d\n", |
intel_dp->link_bw, intel_dp->lane_count, |
807,6 → 742,39 |
return false; |
} |
struct intel_dp_m_n { |
uint32_t tu; |
uint32_t gmch_m; |
uint32_t gmch_n; |
uint32_t link_m; |
uint32_t link_n; |
}; |
static void |
intel_reduce_ratio(uint32_t *num, uint32_t *den) |
{ |
while (*num > 0xffffff || *den > 0xffffff) { |
*num >>= 1; |
*den >>= 1; |
} |
} |
static void |
intel_dp_compute_m_n(int bpp, |
int nlanes, |
int pixel_clock, |
int link_clock, |
struct intel_dp_m_n *m_n) |
{ |
m_n->tu = 64; |
m_n->gmch_m = (pixel_clock * bpp) >> 3; |
m_n->gmch_n = link_clock * nlanes; |
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
m_n->link_m = pixel_clock; |
m_n->link_n = link_clock; |
intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
} |
void |
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
817,10 → 785,9 |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
int lane_count = 4; |
struct intel_link_m_n m_n; |
struct intel_dp_m_n m_n; |
int pipe = intel_crtc->pipe; |
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder; |
int target_clock; |
/* |
* Find the lane count in the intel_encoder private |
836,22 → 803,13 |
} |
} |
target_clock = mode->clock; |
for_each_encoder_on_crtc(dev, crtc, intel_encoder) { |
if (intel_encoder->type == INTEL_OUTPUT_EDP) { |
target_clock = intel_edp_target_clock(intel_encoder, |
mode); |
break; |
} |
} |
/* |
* Compute the GMCH and Link ratios. The '3' here is |
* the number of bytes_per_pixel post-LUT, which we always |
* set up for 8-bits of R/G/B, or 3 bytes total. |
*/ |
intel_link_compute_m_n(intel_crtc->bpp, lane_count, |
target_clock, adjusted_mode->clock, &m_n); |
intel_dp_compute_m_n(intel_crtc->bpp, lane_count, |
mode->clock, adjusted_mode->clock, &m_n); |
if (IS_HASWELL(dev)) { |
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), |
893,32 → 851,6 |
} |
} |
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 dpa_ctl; |
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
dpa_ctl = I915_READ(DP_A); |
dpa_ctl &= ~DP_PLL_FREQ_MASK; |
if (clock < 200000) { |
/* For a long time we've carried around a ILK-DevA w/a for the |
* 160MHz clock. If we're really unlucky, it's still required. |
*/ |
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n"); |
dpa_ctl |= DP_PLL_FREQ_160MHZ; |
} else { |
dpa_ctl |= DP_PLL_FREQ_270MHZ; |
} |
I915_WRITE(DP_A, dpa_ctl); |
POSTING_READ(DP_A); |
udelay(500); |
} |
static void |
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
994,7 → 926,6 |
else |
intel_dp->DP |= DP_PLL_FREQ_270MHZ; |
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { |
if (!HAS_PCH_SPLIT(dev)) |
intel_dp->DP |= intel_dp->color_range; |
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
1019,9 → 950,6 |
} else { |
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
} |
if (is_cpu_edp(intel_dp)) |
ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
} |
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) |
1129,8 → 1057,6 |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 pp; |
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { |
pp = ironlake_get_pp_control(dev_priv); |
pp &= ~EDP_FORCE_VDD; |
1617,7 → 1543,7 |
} |
static uint32_t |
intel_gen4_signal_levels(uint8_t train_set) |
intel_dp_signal_levels(uint8_t train_set) |
{ |
uint32_t signal_levels = 0; |
1715,7 → 1641,7 |
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */ |
static uint32_t |
intel_hsw_signal_levels(uint8_t train_set) |
intel_dp_signal_levels_hsw(uint8_t train_set) |
{ |
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | |
DP_TRAIN_PRE_EMPHASIS_MASK); |
1747,34 → 1673,6 |
} |
} |
/* Properly updates "DP" with the correct signal levels. */ |
static void |
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP) |
{ |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
uint32_t signal_levels, mask; |
uint8_t train_set = intel_dp->train_set[0]; |
if (IS_HASWELL(dev)) { |
signal_levels = intel_hsw_signal_levels(train_set); |
mask = DDI_BUF_EMP_MASK; |
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
signal_levels = intel_gen7_edp_signal_levels(train_set); |
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB; |
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
signal_levels = intel_gen6_edp_signal_levels(train_set); |
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB; |
} else { |
signal_levels = intel_gen4_signal_levels(train_set); |
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK; |
} |
DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels); |
*DP = (*DP & ~mask) | signal_levels; |
} |
static bool |
intel_dp_set_link_train(struct intel_dp *intel_dp, |
uint32_t dp_reg_value, |
1798,8 → 1696,6 |
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { |
case DP_TRAINING_PATTERN_DISABLE: |
if (port != PORT_A) { |
temp |= DP_TP_CTL_LINK_TRAIN_IDLE; |
I915_WRITE(DP_TP_CTL(port), temp); |
1808,8 → 1704,6 |
DRM_ERROR("Timed out waiting for DP idle patterns\n"); |
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; |
} |
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; |
break; |
1897,7 → 1791,7 |
int voltage_tries, loop_tries; |
uint32_t DP = intel_dp->DP; |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
intel_ddi_prepare_link_retrain(encoder); |
/* Write the link configuration data */ |
1915,8 → 1809,24 |
for (;;) { |
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
uint8_t link_status[DP_LINK_STATUS_SIZE]; |
uint32_t signal_levels; |
intel_dp_set_signal_levels(intel_dp, &DP); |
if (IS_HASWELL(dev)) { |
signal_levels = intel_dp_signal_levels_hsw( |
intel_dp->train_set[0]); |
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; |
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; |
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
} else { |
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
} |
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", |
signal_levels); |
/* Set training pattern 1 */ |
if (!intel_dp_set_link_train(intel_dp, DP, |
1940,7 → 1850,7 |
for (i = 0; i < intel_dp->lane_count; i++) |
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
break; |
if (i == intel_dp->lane_count) { |
if (i == intel_dp->lane_count && voltage_tries == 5) { |
++loop_tries; |
if (loop_tries == 5) { |
DRM_DEBUG_KMS("too many full retries, give up\n"); |
1972,6 → 1882,7 |
void |
intel_dp_complete_link_train(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
bool channel_eq = false; |
int tries, cr_tries; |
uint32_t DP = intel_dp->DP; |
1981,6 → 1892,8 |
cr_tries = 0; |
channel_eq = false; |
for (;;) { |
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
uint32_t signal_levels; |
uint8_t link_status[DP_LINK_STATUS_SIZE]; |
if (cr_tries > 5) { |
1989,7 → 1902,19 |
break; |
} |
intel_dp_set_signal_levels(intel_dp, &DP); |
if (IS_HASWELL(dev)) { |
signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]); |
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels; |
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) { |
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; |
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { |
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
} else { |
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); |
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
} |
/* channel eq pattern */ |
if (!intel_dp_set_link_train(intel_dp, DP, |
2039,8 → 1964,6 |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
struct drm_device *dev = intel_dig_port->base.base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = |
to_intel_crtc(intel_dig_port->base.base.crtc); |
uint32_t DP = intel_dp->DP; |
/* |
2058,7 → 1981,7 |
* intel_ddi_prepare_link_retrain will take care of redoing the link |
* train. |
*/ |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
return; |
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)) |
2075,8 → 1998,7 |
} |
POSTING_READ(intel_dp->output_reg); |
/* We don't really know why we're doing this */ |
intel_wait_for_vblank(dev, intel_crtc->pipe); |
msleep(17); |
if (HAS_PCH_IBX(dev) && |
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { |
2096,14 → 2018,19 |
/* Changes to enable or select take place the vblank |
* after being written. |
*/ |
if (WARN_ON(crtc == NULL)) { |
/* We should never try to disable a port without a crtc |
* attached. For paranoia keep the code around for a |
* bit. */ |
if (crtc == NULL) { |
/* We can arrive here never having been attached |
* to a CRTC, for instance, due to inheriting |
* random state from the BIOS. |
* |
* If the pipe is not running, play safe and |
* wait for the clocks to stabilise before |
* continuing. |
*/ |
POSTING_READ(intel_dp->output_reg); |
msleep(50); |
} else |
intel_wait_for_vblank(dev, intel_crtc->pipe); |
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); |
} |
DP &= ~DP_AUDIO_OUTPUT_ENABLE; |
2115,16 → 2042,10 |
static bool |
intel_dp_get_dpcd(struct intel_dp *intel_dp) |
{ |
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, |
sizeof(intel_dp->dpcd)) == 0) |
return false; /* aux transfer failed */ |
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
if (intel_dp->dpcd[DP_DPCD_REV] == 0) |
return false; /* DPCD not present */ |
2285,8 → 2206,6 |
ironlake_dp_detect(struct intel_dp *intel_dp) |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
enum drm_connector_status status; |
/* Can't disconnect eDP, but you can close the lid... */ |
2297,9 → 2216,6 |
return status; |
} |
if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) |
return connector_status_disconnected; |
return intel_dp_detect_dpcd(intel_dp); |
} |
2308,18 → 2224,17 |
{ |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); |
uint32_t bit; |
switch (intel_dig_port->port) { |
case PORT_B: |
bit = PORTB_HOTPLUG_LIVE_STATUS; |
switch (intel_dp->output_reg) { |
case DP_B: |
bit = DPB_HOTPLUG_LIVE_STATUS; |
break; |
case PORT_C: |
bit = PORTC_HOTPLUG_LIVE_STATUS; |
case DP_C: |
bit = DPC_HOTPLUG_LIVE_STATUS; |
break; |
case PORT_D: |
bit = PORTD_HOTPLUG_LIVE_STATUS; |
case DP_D: |
bit = DPD_HOTPLUG_LIVE_STATUS; |
break; |
default: |
return connector_status_unknown; |
2375,6 → 2290,13 |
return intel_ddc_get_modes(connector, adapter); |
} |
/** |
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. |
* |
* \return true if DP port is connected. |
* \return false if DP port is disconnected. |
*/ |
static enum drm_connector_status |
intel_dp_detect(struct drm_connector *connector, bool force) |
{ |
2384,6 → 2306,7 |
struct drm_device *dev = connector->dev; |
enum drm_connector_status status; |
struct edid *edid = NULL; |
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3]; |
intel_dp->has_audio = false; |
2392,6 → 2315,10 |
else |
status = g4x_dp_detect(intel_dp); |
// hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd), |
// 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); |
// DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); |
if (status != connector_status_connected) |
return status; |
2469,7 → 2396,7 |
ret = drm_object_property_set_value(&connector->base, property, val); |
if (ret) |
return ret; |
#if 0 |
if (property == dev_priv->force_audio_property) { |
int i = val; |
bool has_audio; |
2492,23 → 2419,13 |
} |
if (property == dev_priv->broadcast_rgb_property) { |
switch (val) { |
case INTEL_BROADCAST_RGB_AUTO: |
intel_dp->color_range_auto = true; |
break; |
case INTEL_BROADCAST_RGB_FULL: |
intel_dp->color_range_auto = false; |
intel_dp->color_range = 0; |
break; |
case INTEL_BROADCAST_RGB_LIMITED: |
intel_dp->color_range_auto = false; |
intel_dp->color_range = DP_COLOR_RANGE_16_235; |
break; |
default: |
return -EINVAL; |
} |
if (val == !!intel_dp->color_range) |
return 0; |
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; |
goto done; |
} |
#endif |
if (is_edp(intel_dp) && |
property == connector->dev->mode_config.scaling_mode_property) { |
2529,8 → 2446,11 |
return -EINVAL; |
done: |
if (intel_encoder->base.crtc) |
intel_crtc_restore_mode(intel_encoder->base.crtc); |
if (intel_encoder->base.crtc) { |
struct drm_crtc *crtc = intel_encoder->base.crtc; |
intel_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
return 0; |
} |
2559,15 → 2479,12 |
{ |
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
struct intel_dp *intel_dp = &intel_dig_port->dp; |
struct drm_device *dev = intel_dp_to_dev(intel_dp); |
i2c_del_adapter(&intel_dp->adapter); |
drm_encoder_cleanup(encoder); |
if (is_edp(intel_dp)) { |
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work); |
mutex_lock(&dev->mode_config.mutex); |
ironlake_panel_vdd_off_sync(intel_dp); |
mutex_unlock(&dev->mode_config.mutex); |
} |
kfree(intel_dig_port); |
} |
2575,6 → 2492,7 |
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { |
.mode_fixup = intel_dp_mode_fixup, |
.mode_set = intel_dp_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_connector_funcs intel_dp_connector_funcs = { |
2649,7 → 2567,6 |
intel_attach_force_audio_property(connector); |
intel_attach_broadcast_rgb_property(connector); |
intel_dp->color_range_auto = true; |
if (is_edp(intel_dp)) { |
drm_mode_create_scaling_mode_property(connector->dev); |
2839,7 → 2756,7 |
intel_connector_attach_encoder(intel_connector, intel_encoder); |
drm_sysfs_connector_add(connector); |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
else |
intel_connector->get_hw_state = intel_connector_get_hw_state; |
2851,15 → 2768,15 |
name = "DPDDC-A"; |
break; |
case PORT_B: |
dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; |
dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; |
name = "DPDDC-B"; |
break; |
case PORT_C: |
dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; |
dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; |
name = "DPDDC-C"; |
break; |
case PORT_D: |
dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; |
dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; |
name = "DPDDC-D"; |
break; |
default: |
/drivers/video/drm/i915/intel_drv.h |
---|
118,11 → 118,6 |
* timings in the mode to prevent the crtc fixup from overwriting them. |
* Currently only lvds needs that. */ |
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20) |
/* |
* Set when limited 16-235 (as opposed to full 0-255) RGB color range is |
* to be used. |
*/ |
#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40) |
static inline void |
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode, |
167,7 → 162,6 |
bool cloneable; |
bool connectors_active; |
void (*hot_plug)(struct intel_encoder *); |
void (*pre_pll_enable)(struct intel_encoder *); |
void (*pre_enable)(struct intel_encoder *); |
void (*enable)(struct intel_encoder *); |
void (*disable)(struct intel_encoder *); |
220,7 → 214,6 |
* some outputs connected to this crtc. |
*/ |
bool active; |
bool eld_vld; |
bool primary_disabled; /* is the crtc obscured by a plane? */ |
bool lowfreq_avail; |
struct intel_overlay *overlay; |
244,9 → 237,6 |
/* We can share PLLs across outputs if the timings match */ |
struct intel_pch_pll *pch_pll; |
uint32_t ddi_pll_sel; |
/* reset counter value when the last flip was submitted */ |
unsigned int reset_counter; |
}; |
struct intel_plane { |
302,9 → 292,6 |
#define DIP_LEN_AVI 13 |
#define DIP_AVI_PR_1 0 |
#define DIP_AVI_PR_2 1 |
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2) |
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2) |
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2) |
#define DIP_TYPE_SPD 0x83 |
#define DIP_VERSION_SPD 0x1 |
359,11 → 346,9 |
u32 sdvox_reg; |
int ddc_bus; |
uint32_t color_range; |
bool color_range_auto; |
bool has_hdmi_sink; |
bool has_audio; |
enum hdmi_force_audio force_audio; |
bool rgb_quant_range_selectable; |
void (*write_infoframe)(struct drm_encoder *encoder, |
struct dip_infoframe *frame); |
void (*set_infoframes)(struct drm_encoder *encoder, |
380,7 → 365,6 |
bool has_audio; |
enum hdmi_force_audio force_audio; |
uint32_t color_range; |
bool color_range_auto; |
uint8_t link_bw; |
uint8_t lane_count; |
uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
402,7 → 386,6 |
struct intel_digital_port { |
struct intel_encoder base; |
enum port port; |
u32 port_reversal; |
struct intel_dp dp; |
struct intel_hdmi hdmi; |
}; |
465,10 → 448,10 |
extern void intel_dvo_init(struct drm_device *dev); |
extern void intel_tv_init(struct drm_device *dev); |
extern void intel_mark_busy(struct drm_device *dev); |
extern void intel_mark_idle(struct drm_device *dev); |
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj); |
extern void intel_mark_idle(struct drm_device *dev); |
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj); |
extern bool intel_lvds_init(struct drm_device *dev); |
extern bool intel_is_dual_link_lvds(struct drm_device *dev); |
extern void intel_dp_init(struct drm_device *dev, int output_reg, |
enum port port); |
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, |
528,12 → 511,12 |
bool mode_changed; |
}; |
extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, |
extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, |
int x, int y, struct drm_framebuffer *old_fb); |
extern void intel_modeset_disable(struct drm_device *dev); |
extern void intel_crtc_restore_mode(struct drm_crtc *crtc); |
extern void intel_crtc_load_lut(struct drm_crtc *crtc); |
extern void intel_crtc_update_dpms(struct drm_crtc *crtc); |
extern void intel_encoder_noop(struct drm_encoder *encoder); |
extern void intel_encoder_destroy(struct drm_encoder *encoder); |
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode); |
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder); |
572,9 → 555,6 |
return container_of(intel_hdmi, struct intel_digital_port, hdmi); |
} |
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv, |
struct intel_digital_port *port); |
extern void intel_connector_attach_encoder(struct intel_connector *connector, |
struct intel_encoder *encoder); |
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector); |
618,7 → 598,6 |
struct drm_mode_fb_cmd2 *mode_cmd, |
struct drm_i915_gem_object *obj); |
extern int intel_fbdev_init(struct drm_device *dev); |
extern void intel_fbdev_initial_config(struct drm_device *dev); |
extern void intel_fbdev_fini(struct drm_device *dev); |
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state); |
extern void intel_prepare_page_flip(struct drm_device *dev, int plane); |
657,8 → 636,7 |
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe, |
struct drm_display_mode *mode); |
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y, |
unsigned int tiling_mode, |
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y, |
unsigned int bpp, |
unsigned int pitch); |
679,8 → 657,7 |
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv); |
extern void intel_gpu_ips_teardown(void); |
extern void intel_init_power_well(struct drm_device *dev); |
extern void intel_set_power_well(struct drm_device *dev, bool enable); |
extern void intel_init_power_wells(struct drm_device *dev); |
extern void intel_enable_gt_powersave(struct drm_device *dev); |
extern void intel_disable_gt_powersave(struct drm_device *dev); |
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv); |
/drivers/video/drm/i915/intel_hdmi.c |
---|
48,7 → 48,7 |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t enabled_bits; |
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; |
enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; |
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits, |
"HDMI port enabled, expecting disabled\n"); |
331,7 → 331,6 |
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, |
struct drm_display_mode *adjusted_mode) |
{ |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
struct dip_infoframe avi_if = { |
.type = DIP_TYPE_AVI, |
.ver = DIP_VERSION_AVI, |
341,15 → 340,8 |
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) |
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2; |
if (intel_hdmi->rgb_quant_range_selectable) { |
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; |
else |
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; |
} |
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode); |
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode); |
intel_set_infoframe(encoder, &avi_if); |
} |
372,8 → 364,7 |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
u32 reg = VIDEO_DIP_CTL; |
u32 val = I915_READ(reg); |
u32 port; |
400,11 → 391,11 |
return; |
} |
switch (intel_dig_port->port) { |
case PORT_B: |
switch (intel_hdmi->sdvox_reg) { |
case SDVOB: |
port = VIDEO_DIP_PORT_B; |
break; |
case PORT_C: |
case SDVOC: |
port = VIDEO_DIP_PORT_C; |
break; |
default: |
437,8 → 428,7 |
{ |
struct drm_i915_private *dev_priv = encoder->dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); |
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); |
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe); |
u32 val = I915_READ(reg); |
u32 port; |
457,14 → 447,14 |
return; |
} |
switch (intel_dig_port->port) { |
case PORT_B: |
switch (intel_hdmi->sdvox_reg) { |
case HDMIB: |
port = VIDEO_DIP_PORT_B; |
break; |
case PORT_C: |
case HDMIC: |
port = VIDEO_DIP_PORT_C; |
break; |
case PORT_D: |
case HDMID: |
port = VIDEO_DIP_PORT_D; |
break; |
default: |
776,38 → 766,46 |
const struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); |
if (intel_hdmi->color_range_auto) { |
/* See CEA-861-E - 5.1 Default Encoding Parameters */ |
if (intel_hdmi->has_hdmi_sink && |
drm_match_cea_mode(adjusted_mode) > 1) |
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; |
else |
intel_hdmi->color_range = 0; |
return true; |
} |
if (intel_hdmi->color_range) |
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; |
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi) |
{ |
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t bit; |
return true; |
switch (intel_hdmi->sdvox_reg) { |
case SDVOB: |
bit = HDMIB_HOTPLUG_LIVE_STATUS; |
break; |
case SDVOC: |
bit = HDMIC_HOTPLUG_LIVE_STATUS; |
break; |
default: |
bit = 0; |
break; |
} |
return I915_READ(PORT_HOTPLUG_STAT) & bit; |
} |
static enum drm_connector_status |
intel_hdmi_detect(struct drm_connector *connector, bool force) |
{ |
struct drm_device *dev = connector->dev; |
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); |
struct intel_digital_port *intel_dig_port = |
hdmi_to_dig_port(intel_hdmi); |
struct intel_encoder *intel_encoder = &intel_dig_port->base; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_private *dev_priv = connector->dev->dev_private; |
struct edid *edid; |
enum drm_connector_status status = connector_status_disconnected; |
if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi)) |
return status; |
intel_hdmi->has_hdmi_sink = false; |
intel_hdmi->has_audio = false; |
intel_hdmi->rgb_quant_range_selectable = false; |
edid = drm_get_edid(connector, |
intel_gmbus_get_adapter(dev_priv, |
intel_hdmi->ddc_bus)); |
819,8 → 817,6 |
intel_hdmi->has_hdmi_sink = |
drm_detect_hdmi_monitor(edid); |
intel_hdmi->has_audio = drm_detect_monitor_audio(edid); |
intel_hdmi->rgb_quant_range_selectable = |
drm_rgb_quant_range_selectable(edid); |
} |
kfree(edid); |
} |
883,7 → 879,7 |
ret = drm_object_property_set_value(&connector->base, property, val); |
if (ret) |
return ret; |
#if 0 |
if (property == dev_priv->force_audio_property) { |
enum hdmi_force_audio i = val; |
bool has_audio; |
904,23 → 900,13 |
intel_hdmi->has_audio = has_audio; |
goto done; |
} |
#endif |
if (property == dev_priv->broadcast_rgb_property) { |
switch (val) { |
case INTEL_BROADCAST_RGB_AUTO: |
intel_hdmi->color_range_auto = true; |
break; |
case INTEL_BROADCAST_RGB_FULL: |
intel_hdmi->color_range_auto = false; |
intel_hdmi->color_range = 0; |
break; |
case INTEL_BROADCAST_RGB_LIMITED: |
intel_hdmi->color_range_auto = false; |
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235; |
break; |
default: |
return -EINVAL; |
} |
if (val == !!intel_hdmi->color_range) |
return 0; |
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; |
goto done; |
} |
927,8 → 913,11 |
return -EINVAL; |
done: |
if (intel_dig_port->base.base.crtc) |
intel_crtc_restore_mode(intel_dig_port->base.base.crtc); |
if (intel_dig_port->base.base.crtc) { |
struct drm_crtc *crtc = intel_dig_port->base.base.crtc; |
intel_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
return 0; |
} |
943,6 → 932,7 |
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = { |
.mode_fixup = intel_hdmi_mode_fixup, |
.mode_set = intel_hdmi_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_connector_funcs intel_hdmi_connector_funcs = { |
968,7 → 958,6 |
{ |
intel_attach_force_audio_property(connector); |
intel_attach_broadcast_rgb_property(connector); |
intel_hdmi->color_range_auto = true; |
} |
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, |
992,15 → 981,15 |
switch (port) { |
case PORT_B: |
intel_hdmi->ddc_bus = GMBUS_PORT_DPB; |
dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS; |
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; |
break; |
case PORT_C: |
intel_hdmi->ddc_bus = GMBUS_PORT_DPC; |
dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS; |
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; |
break; |
case PORT_D: |
intel_hdmi->ddc_bus = GMBUS_PORT_DPD; |
dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS; |
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; |
break; |
case PORT_A: |
/* Internal port only for eDP. */ |
1025,7 → 1014,7 |
intel_hdmi->set_infoframes = cpt_set_infoframes; |
} |
if (HAS_DDI(dev)) |
if (IS_HASWELL(dev)) |
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; |
else |
intel_connector->get_hw_state = intel_connector_get_hw_state; |
/drivers/video/drm/i915/intel_i2c.c |
---|
63,7 → 63,6 |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0); |
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0); |
} |
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable) |
203,78 → 202,7 |
algo->data = bus; |
} |
/* |
* gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI |
* mode. This results in spurious interrupt warnings if the legacy irq no. is |
* shared with another device. The kernel then disables that interrupt source |
* and so prevents the other device from working properly. |
*/ |
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) |
static int |
gmbus_wait_hw_status(struct drm_i915_private *dev_priv, |
u32 gmbus2_status, |
u32 gmbus4_irq_en) |
{ |
int i; |
int reg_offset = dev_priv->gpio_mmio_base; |
u32 gmbus2 = 0; |
DEFINE_WAIT(wait); |
if (!HAS_GMBUS_IRQ(dev_priv->dev)) |
gmbus4_irq_en = 0; |
/* Important: The hw handles only the first bit, so set only one! Since |
* we also need to check for NAKs besides the hw ready/idle signal, we |
* need to wake up periodically and check that ourselves. */ |
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en); |
for (i = 0; i < msecs_to_jiffies(50) + 1; i++) { |
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait, |
TASK_UNINTERRUPTIBLE); |
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset); |
if (gmbus2 & (GMBUS_SATOER | gmbus2_status)) |
break; |
schedule_timeout(1); |
} |
finish_wait(&dev_priv->gmbus_wait_queue, &wait); |
I915_WRITE(GMBUS4 + reg_offset, 0); |
if (gmbus2 & GMBUS_SATOER) |
return -ENXIO; |
if (gmbus2 & gmbus2_status) |
return 0; |
return -ETIMEDOUT; |
} |
static int |
gmbus_wait_idle(struct drm_i915_private *dev_priv) |
{ |
int ret; |
int reg_offset = dev_priv->gpio_mmio_base; |
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0) |
if (!HAS_GMBUS_IRQ(dev_priv->dev)) |
return wait_for(C, 10); |
/* Important: The hw handles only the first bit, so set only one! */ |
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN); |
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10); |
I915_WRITE(GMBUS4 + reg_offset, 0); |
if (ret) |
return 0; |
else |
return -ETIMEDOUT; |
#undef C |
} |
static int |
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, |
u32 gmbus1_index) |
{ |
291,11 → 219,15 |
while (len) { |
int ret; |
u32 val, loop = 0; |
u32 gmbus2; |
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, |
GMBUS_HW_RDY_EN); |
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & |
(GMBUS_SATOER | GMBUS_HW_RDY), |
50); |
if (ret) |
return ret; |
return -ETIMEDOUT; |
if (gmbus2 & GMBUS_SATOER) |
return -ENXIO; |
val = I915_READ(GMBUS3 + reg_offset); |
do { |
329,6 → 261,7 |
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); |
while (len) { |
int ret; |
u32 gmbus2; |
val = loop = 0; |
do { |
337,10 → 270,13 |
I915_WRITE(GMBUS3 + reg_offset, val); |
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY, |
GMBUS_HW_RDY_EN); |
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & |
(GMBUS_SATOER | GMBUS_HW_RDY), |
50); |
if (ret) |
return ret; |
return -ETIMEDOUT; |
if (gmbus2 & GMBUS_SATOER) |
return -ENXIO; |
} |
return 0; |
} |
409,6 → 345,8 |
I915_WRITE(GMBUS0 + reg_offset, bus->reg0); |
for (i = 0; i < num; i++) { |
u32 gmbus2; |
if (gmbus_is_index_read(msgs, i, num)) { |
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]); |
i += 1; /* set i to the index of the read xfer */ |
423,12 → 361,13 |
if (ret == -ENXIO) |
goto clear_err; |
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE, |
GMBUS_HW_WAIT_EN); |
if (ret == -ENXIO) |
goto clear_err; |
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) & |
(GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), |
50); |
if (ret) |
goto timeout; |
if (gmbus2 & GMBUS_SATOER) |
goto clear_err; |
} |
/* Generate a STOP condition on the bus. Note that gmbus can't generata |
441,7 → 380,8 |
* We will re-enable it at the start of the next xfer, |
* till then let it sleep. |
*/ |
if (gmbus_wait_idle(dev_priv)) { |
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, |
10)) { |
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n", |
adapter->name); |
ret = -ETIMEDOUT; |
465,7 → 405,8 |
* it's slow responding and only answers on the 2nd retry. |
*/ |
ret = -ENXIO; |
if (gmbus_wait_idle(dev_priv)) { |
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, |
10)) { |
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n", |
adapter->name); |
ret = -ETIMEDOUT; |
524,13 → 465,10 |
if (HAS_PCH_SPLIT(dev)) |
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA; |
else if (IS_VALLEYVIEW(dev)) |
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE; |
else |
dev_priv->gpio_mmio_base = 0; |
mutex_init(&dev_priv->gmbus_mutex); |
init_waitqueue_head(&dev_priv->gmbus_wait_queue); |
for (i = 0; i < GMBUS_NUM_PORTS; i++) { |
struct intel_gmbus *bus = &dev_priv->gmbus[i]; |
/drivers/video/drm/i915/intel_lvds.c |
---|
51,8 → 51,7 |
u32 pfit_control; |
u32 pfit_pgm_ratios; |
bool is_dual_link; |
u32 reg; |
bool pfit_dirty; |
struct intel_lvds_connector *attached_connector; |
}; |
72,11 → 71,16 |
{ |
struct drm_device *dev = encoder->base.dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
u32 tmp; |
u32 lvds_reg, tmp; |
tmp = I915_READ(lvds_encoder->reg); |
if (HAS_PCH_SPLIT(dev)) { |
lvds_reg = PCH_LVDS; |
} else { |
lvds_reg = LVDS; |
} |
tmp = I915_READ(lvds_reg); |
if (!(tmp & LVDS_PORT_EN)) |
return false; |
88,77 → 92,30 |
return true; |
} |
/* The LVDS pin pair needs to be on before the DPLLs are enabled. |
* This is an exception to the general rule that mode_set doesn't turn |
* things on. |
/** |
* Sets the power state for the panel. |
*/ |
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder) |
static void intel_enable_lvds(struct intel_encoder *encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
struct drm_device *dev = encoder->base.dev; |
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
struct drm_display_mode *fixed_mode = |
lvds_encoder->attached_connector->base.panel.fixed_mode; |
int pipe = intel_crtc->pipe; |
u32 temp; |
u32 ctl_reg, lvds_reg, stat_reg; |
temp = I915_READ(lvds_encoder->reg); |
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
if (HAS_PCH_CPT(dev)) { |
temp &= ~PORT_TRANS_SEL_MASK; |
temp |= PORT_TRANS_SEL_CPT(pipe); |
if (HAS_PCH_SPLIT(dev)) { |
ctl_reg = PCH_PP_CONTROL; |
lvds_reg = PCH_LVDS; |
stat_reg = PCH_PP_STATUS; |
} else { |
if (pipe == 1) { |
temp |= LVDS_PIPEB_SELECT; |
} else { |
temp &= ~LVDS_PIPEB_SELECT; |
ctl_reg = PP_CONTROL; |
lvds_reg = LVDS; |
stat_reg = PP_STATUS; |
} |
} |
/* set the corresponsding LVDS_BORDER bit */ |
temp |= dev_priv->lvds_border_bits; |
/* Set the B0-B3 data pairs corresponding to whether we're going to |
* set the DPLLs for dual-channel mode or not. |
*/ |
if (lvds_encoder->is_dual_link) |
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
else |
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) |
* appropriately here, but we need to look more thoroughly into how |
* panels behave in the two modes. |
*/ |
/* Set the dithering flag on LVDS as needed, note that there is no |
* special lvds dither control bit on pch-split platforms, dithering is |
* only controlled through the PIPECONF reg. */ |
if (INTEL_INFO(dev)->gen == 4) { |
if (dev_priv->lvds_dither) |
temp |= LVDS_ENABLE_DITHER; |
else |
temp &= ~LVDS_ENABLE_DITHER; |
} |
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); |
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) |
temp |= LVDS_HSYNC_POLARITY; |
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) |
temp |= LVDS_VSYNC_POLARITY; |
I915_WRITE(lvds_encoder->reg, temp); |
} |
static void intel_pre_enable_lvds(struct intel_encoder *encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base); |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (HAS_PCH_SPLIT(dev) || !enc->pfit_control) |
return; |
if (lvds_encoder->pfit_dirty) { |
/* |
* Enable automatic panel scaling so that non-native modes |
* fill the screen. The panel fitter should only be |
166,36 → 123,16 |
* register description and PRM. |
*/ |
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", |
enc->pfit_control, |
enc->pfit_pgm_ratios); |
lvds_encoder->pfit_control, |
lvds_encoder->pfit_pgm_ratios); |
I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios); |
I915_WRITE(PFIT_CONTROL, enc->pfit_control); |
I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios); |
I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control); |
lvds_encoder->pfit_dirty = false; |
} |
/** |
* Sets the power state for the panel. |
*/ |
static void intel_enable_lvds(struct intel_encoder *encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 ctl_reg, stat_reg; |
if (HAS_PCH_SPLIT(dev)) { |
ctl_reg = PCH_PP_CONTROL; |
stat_reg = PCH_PP_STATUS; |
} else { |
ctl_reg = PP_CONTROL; |
stat_reg = PP_STATUS; |
} |
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN); |
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); |
POSTING_READ(lvds_encoder->reg); |
POSTING_READ(lvds_reg); |
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) |
DRM_ERROR("timed out waiting for panel to power on\n"); |
207,13 → 144,15 |
struct drm_device *dev = encoder->base.dev; |
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); |
struct drm_i915_private *dev_priv = dev->dev_private; |
u32 ctl_reg, stat_reg; |
u32 ctl_reg, lvds_reg, stat_reg; |
if (HAS_PCH_SPLIT(dev)) { |
ctl_reg = PCH_PP_CONTROL; |
lvds_reg = PCH_LVDS; |
stat_reg = PCH_PP_STATUS; |
} else { |
ctl_reg = PP_CONTROL; |
lvds_reg = LVDS; |
stat_reg = PP_STATUS; |
} |
223,10 → 162,15 |
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) |
DRM_ERROR("timed out waiting for panel to power off\n"); |
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); |
POSTING_READ(lvds_encoder->reg); |
if (lvds_encoder->pfit_control) { |
I915_WRITE(PFIT_CONTROL, 0); |
lvds_encoder->pfit_dirty = true; |
} |
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); |
POSTING_READ(lvds_reg); |
} |
static int intel_lvds_mode_valid(struct drm_connector *connector, |
struct drm_display_mode *mode) |
{ |
462,6 → 406,7 |
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) { |
lvds_encoder->pfit_control = pfit_control; |
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios; |
lvds_encoder->pfit_dirty = true; |
} |
dev_priv->lvds_border_bits = border; |
548,14 → 493,13 |
#if 0 |
/* |
* Lid events. Note the use of 'modeset': |
* - we set it to MODESET_ON_LID_OPEN on lid close, |
* and set it to MODESET_DONE on open |
* Lid events. Note the use of 'modeset_on_lid': |
* - we set it on lid close, and reset it on open |
* - we use it as a "only once" bit (ie we ignore |
* duplicate events where it was already properly set) |
* - the suspend/resume paths will set it to |
* MODESET_SUSPENDED and ignore the lid open event, |
* because they restore the mode ("lid open"). |
* duplicate events where it was already properly |
* set/reset) |
* - the suspend/resume paths will also set it to |
* zero, since they restore the mode ("lid open"). |
*/ |
static int intel_lid_notify(struct notifier_block *nb, unsigned long val, |
void *unused) |
569,9 → 513,6 |
if (dev->switch_power_state != DRM_SWITCH_POWER_ON) |
return NOTIFY_OK; |
mutex_lock(&dev_priv->modeset_restore_lock); |
if (dev_priv->modeset_restore == MODESET_SUSPENDED) |
goto exit; |
/* |
* check and update the status of LVDS connector after receiving |
* the LID nofication event. |
580,24 → 521,21 |
/* Don't force modeset on machines where it causes a GPU lockup */ |
if (dmi_check_system(intel_no_modeset_on_lid)) |
goto exit; |
return NOTIFY_OK; |
if (!acpi_lid_open()) { |
/* do modeset on next lid open event */ |
dev_priv->modeset_restore = MODESET_ON_LID_OPEN; |
goto exit; |
dev_priv->modeset_on_lid = 1; |
return NOTIFY_OK; |
} |
if (dev_priv->modeset_restore == MODESET_DONE) |
goto exit; |
if (!dev_priv->modeset_on_lid) |
return NOTIFY_OK; |
drm_modeset_lock_all(dev); |
dev_priv->modeset_on_lid = 0; |
mutex_lock(&dev->mode_config.mutex); |
intel_modeset_setup_hw_state(dev, true); |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
dev_priv->modeset_restore = MODESET_DONE; |
exit: |
mutex_unlock(&dev_priv->modeset_restore_lock); |
return NOTIFY_OK; |
} |
#endif |
653,7 → 591,8 |
* If the CRTC is enabled, the display will be changed |
* according to the new panel fitting mode. |
*/ |
intel_crtc_restore_mode(crtc); |
intel_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
} |
663,6 → 602,7 |
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = { |
.mode_fixup = intel_lvds_mode_fixup, |
.mode_set = intel_lvds_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { |
955,53 → 895,6 |
return false; |
} |
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id) |
{ |
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident); |
return 1; |
} |
bool intel_is_dual_link_lvds(struct drm_device *dev) |
{ |
struct intel_encoder *encoder; |
struct intel_lvds_encoder *lvds_encoder; |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
base.head) { |
if (encoder->type == INTEL_OUTPUT_LVDS) { |
lvds_encoder = to_lvds_encoder(&encoder->base); |
return lvds_encoder->is_dual_link; |
} |
} |
return false; |
} |
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) |
{ |
struct drm_device *dev = lvds_encoder->base.base.dev; |
unsigned int val; |
struct drm_i915_private *dev_priv = dev->dev_private; |
/* use the module option value if specified */ |
if (i915_lvds_channel_mode > 0) |
return i915_lvds_channel_mode == 2; |
// if (dmi_check_system(intel_dual_link_lvds)) |
// return true; |
/* BIOS should set the proper LVDS register value at boot, but |
* in reality, it doesn't set the value when the lid is closed; |
* we need to check "the value to be set" in VBT when LVDS |
* register is uninitialized. |
*/ |
val = I915_READ(lvds_encoder->reg); |
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED))) |
val = dev_priv->bios_lvds_val; |
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; |
} |
static bool intel_lvds_supported(struct drm_device *dev) |
{ |
/* With the introduction of the PCH we gained a dedicated |
1087,8 → 980,6 |
DRM_MODE_ENCODER_LVDS); |
intel_encoder->enable = intel_enable_lvds; |
intel_encoder->pre_enable = intel_pre_enable_lvds; |
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds; |
intel_encoder->disable = intel_disable_lvds; |
intel_encoder->get_hw_state = intel_lvds_get_hw_state; |
intel_connector->get_hw_state = intel_connector_get_hw_state; |
1110,12 → 1001,6 |
connector->interlace_allowed = false; |
connector->doublescan_allowed = false; |
if (HAS_PCH_SPLIT(dev)) { |
lvds_encoder->reg = PCH_LVDS; |
} else { |
lvds_encoder->reg = LVDS; |
} |
/* create the scaling mode property */ |
drm_mode_create_scaling_mode_property(dev); |
drm_object_attach_property(&connector->base, |
1216,10 → 1101,6 |
goto failed; |
out: |
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); |
DRM_DEBUG_KMS("detected %s-link lvds configuration\n", |
lvds_encoder->is_dual_link ? "dual" : "single"); |
/* |
* Unlock registers and just |
* leave them unlocked |
/drivers/video/drm/i915/intel_modes.c |
---|
28,6 → 28,7 |
#include <linux/fb.h> |
#include <drm/drm_edid.h> |
#include <drm/drmP.h> |
#include <drm/drm_edid.h> |
#include "intel_drv.h" |
#include "i915_drv.h" |
84,7 → 85,7 |
struct drm_device *dev = connector->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_property *prop; |
#if 0 |
prop = dev_priv->force_audio_property; |
if (prop == NULL) { |
prop = drm_property_create_enum(dev, 0, |
97,12 → 98,12 |
dev_priv->force_audio_property = prop; |
} |
drm_object_attach_property(&connector->base, prop, 0); |
#endif |
} |
static const struct drm_prop_enum_list broadcast_rgb_names[] = { |
{ INTEL_BROADCAST_RGB_AUTO, "Automatic" }, |
{ INTEL_BROADCAST_RGB_FULL, "Full" }, |
{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" }, |
{ 0, "Full" }, |
{ 1, "Limited 16:235" }, |
}; |
void |
111,7 → 112,7 |
struct drm_device *dev = connector->dev; |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_property *prop; |
#if 0 |
prop = dev_priv->broadcast_rgb_property; |
if (prop == NULL) { |
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, |
125,4 → 126,5 |
} |
drm_object_attach_property(&connector->base, prop, 0); |
#endif |
} |
/drivers/video/drm/i915/intel_pm.c |
---|
470,6 → 470,12 |
dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
goto out_disable; |
} |
if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
DRM_DEBUG_KMS("framebuffer too large, disabling " |
"compression\n"); |
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
goto out_disable; |
} |
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || |
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { |
DRM_DEBUG_KMS("mode incompatible with compression, " |
503,14 → 509,6 |
if (in_dbg_master()) |
goto out_disable; |
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) { |
DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size); |
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); |
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n"); |
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
goto out_disable; |
} |
/* If the scanout has not changed, don't modify the FBC settings. |
* Note that we make the fundamental assumption that the fb->obj |
* cannot be unpinned (and have its GTT offset and fence revoked) |
558,7 → 556,6 |
DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); |
intel_disable_fbc(dev); |
} |
i915_gem_stolen_cleanup_compression(dev); |
} |
static void i915_pineview_get_mem_freq(struct drm_device *dev) |
2312,6 → 2309,7 |
i915_gem_object_unpin(ctx); |
err_unref: |
drm_gem_object_unreference(&ctx->base); |
mutex_unlock(&dev->struct_mutex); |
return NULL; |
} |
2597,7 → 2595,7 |
I915_WRITE(GEN6_RC_SLEEP, 0); |
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); |
I915_WRITE(GEN6_RC6_THRESHOLD, 50000); |
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); |
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); |
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ |
/* Check if we are enabling RC6 */ |
3467,7 → 3465,6 |
ironlake_disable_rc6(dev); |
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) { |
gen6_disable_rps(dev); |
mutex_unlock(&dev_priv->rps.hw_lock); |
} |
} |
3593,19 → 3590,6 |
} |
} |
static void gen6_check_mch_setup(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
uint32_t tmp; |
tmp = I915_READ(MCH_SSKPD); |
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) { |
DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp); |
DRM_INFO("This can cause pipe underruns and display issues.\n"); |
DRM_INFO("Please upgrade your BIOS to fix this.\n"); |
} |
} |
static void gen6_init_clock_gating(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
3698,8 → 3682,6 |
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI)); |
cpt_init_clock_gating(dev); |
gen6_check_mch_setup(dev); |
} |
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv) |
3711,10 → 3693,6 |
reg |= GEN7_FF_VS_SCHED_HW; |
reg |= GEN7_FF_DS_SCHED_HW; |
/* WaVSRefCountFullforceMissDisable */ |
if (IS_HASWELL(dev_priv->dev)) |
reg &= ~GEN7_FF_VS_REF_CNT_FFME; |
I915_WRITE(GEN7_FF_THREAD_MODE, reg); |
} |
3885,8 → 3863,6 |
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); |
cpt_init_clock_gating(dev); |
gen6_check_mch_setup(dev); |
} |
static void valleyview_init_clock_gating(struct drm_device *dev) |
4080,57 → 4056,35 |
dev_priv->display.init_clock_gating(dev); |
} |
void intel_set_power_well(struct drm_device *dev, bool enable) |
/* Starting with Haswell, we have different power wells for |
* different parts of the GPU. This attempts to enable them all. |
*/ |
void intel_init_power_wells(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
bool is_enabled, enable_requested; |
uint32_t tmp; |
unsigned long power_wells[] = { |
HSW_PWR_WELL_CTL1, |
HSW_PWR_WELL_CTL2, |
HSW_PWR_WELL_CTL4 |
}; |
int i; |
if (!IS_HASWELL(dev)) |
return; |
tmp = I915_READ(HSW_PWR_WELL_DRIVER); |
is_enabled = tmp & HSW_PWR_WELL_STATE; |
enable_requested = tmp & HSW_PWR_WELL_ENABLE; |
mutex_lock(&dev->struct_mutex); |
if (enable) { |
if (!enable_requested) |
I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE); |
for (i = 0; i < ARRAY_SIZE(power_wells); i++) { |
int well = I915_READ(power_wells[i]); |
if (!is_enabled) { |
DRM_DEBUG_KMS("Enabling power well\n"); |
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & |
HSW_PWR_WELL_STATE), 20)) |
DRM_ERROR("Timeout enabling power well\n"); |
if ((well & HSW_PWR_WELL_STATE) == 0) { |
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE); |
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20)) |
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]); |
} |
} else { |
if (enable_requested) { |
I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
} |
} |
} |
/* |
* Starting with Haswell, we have a "Power Down Well" that can be turned off |
* when not needed anymore. We have 4 registers that can request the power well |
* to be enabled, and it will only be disabled if none of the registers is |
* requesting it to be enabled. |
*/ |
void intel_init_power_well(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (!IS_HASWELL(dev)) |
return; |
/* For now, we need the power well to be always enabled. */ |
intel_set_power_well(dev, true); |
/* We're taking over the BIOS, so clear any requests made by it since |
* the driver is in charge now. */ |
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE) |
I915_WRITE(HSW_PWR_WELL_BIOS, 0); |
mutex_unlock(&dev->struct_mutex); |
} |
/* Set up chip specific power management-related functions */ |
/drivers/video/drm/i915/intel_ringbuffer.c |
---|
320,7 → 320,6 |
* TLB invalidate requires a post-sync write. |
*/ |
flags |= PIPE_CONTROL_QW_WRITE; |
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; |
/* Workaround: we must issue a pipe_control with CS-stall bit |
* set before a pipe_control command that has the state cache |
334,7 → 333,7 |
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); |
intel_ring_emit(ring, flags); |
intel_ring_emit(ring, scratch_addr); |
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); |
intel_ring_emit(ring, 0); |
intel_ring_advance(ring); |
466,9 → 465,6 |
if (pc->cpu_page == NULL) |
goto err_unpin; |
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", |
ring->name, pc->gtt_offset); |
pc->obj = obj; |
ring->private = pc; |
return 0; |
560,8 → 556,6 |
static void render_ring_cleanup(struct intel_ring_buffer *ring) |
{ |
struct drm_device *dev = ring->dev; |
if (!ring->private) |
return; |
611,13 → 605,6 |
return 0; |
} |
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, |
u32 seqno) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
return dev_priv->last_seqno < seqno; |
} |
/** |
* intel_ring_sync - sync the waiter to the signaller on seqno |
* |
648,20 → 635,11 |
if (ret) |
return ret; |
/* If seqno wrap happened, omit the wait with no-ops */ |
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { |
intel_ring_emit(waiter, |
dw1 | |
signaller->semaphore_register[waiter->id]); |
dw1 | signaller->semaphore_register[waiter->id]); |
intel_ring_emit(waiter, seqno); |
intel_ring_emit(waiter, 0); |
intel_ring_emit(waiter, MI_NOOP); |
} else { |
intel_ring_emit(waiter, MI_NOOP); |
intel_ring_emit(waiter, MI_NOOP); |
intel_ring_emit(waiter, MI_NOOP); |
intel_ring_emit(waiter, MI_NOOP); |
} |
intel_ring_advance(waiter); |
return 0; |
742,12 → 720,6 |
return intel_read_status_page(ring, I915_GEM_HWS_INDEX); |
} |
static void |
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) |
{ |
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno); |
} |
static u32 |
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) |
{ |
755,13 → 727,6 |
return pc->cpu_page[0]; |
} |
static void |
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) |
{ |
struct pipe_control *pc = ring->private; |
pc->cpu_page[0] = seqno; |
} |
static bool |
gen5_ring_get_irq(struct intel_ring_buffer *ring) |
{ |
1191,10 → 1156,6 |
return ret; |
} |
obj = NULL; |
if (!HAS_LLC(dev)) |
obj = i915_gem_object_create_stolen(dev, ring->size); |
if (obj == NULL) |
obj = i915_gem_alloc_object(dev, ring->size); |
if (obj == NULL) { |
DRM_ERROR("Failed to allocate ringbuffer\n"); |
1213,7 → 1174,7 |
goto err_unpin; |
ring->virtual_start = |
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset, |
ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, |
ring->size); |
if (ring->virtual_start == NULL) { |
DRM_ERROR("Failed to map ringbuffer.\n"); |
1236,7 → 1197,7 |
return 0; |
err_unmap: |
iounmap(ring->virtual_start); |
FreeKernelSpace(ring->virtual_start); |
err_unpin: |
i915_gem_object_unpin(obj); |
err_unref: |
1264,7 → 1225,7 |
I915_WRITE_CTL(ring, 0); |
iounmap(ring->virtual_start); |
// drm_core_ioremapfree(&ring->map, ring->dev); |
i915_gem_object_unpin(ring->obj); |
drm_gem_object_unreference(&ring->obj->base); |
1373,8 → 1334,7 |
msleep(1); |
ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
dev_priv->mm.interruptible); |
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); |
if (ret) |
return ret; |
} while (!time_after(GetTimerTicks(), end)); |
1436,35 → 1396,14 |
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); |
} |
static int __intel_ring_begin(struct intel_ring_buffer *ring, |
int bytes) |
{ |
int ret; |
if (unlikely(ring->tail + bytes > ring->effective_size)) { |
ret = intel_wrap_ring_buffer(ring); |
if (unlikely(ret)) |
return ret; |
} |
if (unlikely(ring->space < bytes)) { |
ret = ring_wait_for_space(ring, bytes); |
if (unlikely(ret)) |
return ret; |
} |
ring->space -= bytes; |
return 0; |
} |
int intel_ring_begin(struct intel_ring_buffer *ring, |
int num_dwords) |
{ |
drm_i915_private_t *dev_priv = ring->dev->dev_private; |
int n = 4*num_dwords; |
int ret; |
ret = i915_gem_check_wedge(&dev_priv->gpu_error, |
dev_priv->mm.interruptible); |
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible); |
if (ret) |
return ret; |
1473,21 → 1412,20 |
if (ret) |
return ret; |
return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); |
if (unlikely(ring->tail + n > ring->effective_size)) { |
ret = intel_wrap_ring_buffer(ring); |
if (unlikely(ret)) |
return ret; |
} |
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) |
{ |
struct drm_i915_private *dev_priv = ring->dev->dev_private; |
BUG_ON(ring->outstanding_lazy_request); |
if (INTEL_INFO(ring->dev)->gen >= 6) { |
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); |
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0); |
if (unlikely(ring->space < n)) { |
ret = ring_wait_for_space(ring, n); |
if (unlikely(ret)) |
return ret; |
} |
ring->set_seqno(ring, seqno); |
ring->space -= n; |
return 0; |
} |
void intel_ring_advance(struct intel_ring_buffer *ring) |
1495,7 → 1433,7 |
struct drm_i915_private *dev_priv = ring->dev->dev_private; |
ring->tail &= ring->size - 1; |
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) |
if (dev_priv->stop_rings & intel_ring_flag(ring)) |
return; |
ring->write_tail(ring, ring->tail); |
} |
1652,7 → 1590,6 |
ring->irq_put = gen6_ring_put_irq; |
ring->irq_enable_mask = GT_USER_INTERRUPT; |
ring->get_seqno = gen6_ring_get_seqno; |
ring->set_seqno = ring_set_seqno; |
ring->sync_to = gen6_ring_sync; |
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID; |
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV; |
1663,7 → 1600,6 |
ring->add_request = pc_render_add_request; |
ring->flush = gen4_render_ring_flush; |
ring->get_seqno = pc_render_get_seqno; |
ring->set_seqno = pc_render_set_seqno; |
ring->irq_get = gen5_ring_get_irq; |
ring->irq_put = gen5_ring_put_irq; |
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY; |
1674,7 → 1610,6 |
else |
ring->flush = gen4_render_ring_flush; |
ring->get_seqno = ring_get_seqno; |
ring->set_seqno = ring_set_seqno; |
if (IS_GEN2(dev)) { |
ring->irq_get = i8xx_ring_get_irq; |
ring->irq_put = i8xx_ring_put_irq; |
1747,7 → 1682,6 |
else |
ring->flush = gen4_render_ring_flush; |
ring->get_seqno = ring_get_seqno; |
ring->set_seqno = ring_set_seqno; |
if (IS_GEN2(dev)) { |
ring->irq_get = i8xx_ring_get_irq; |
ring->irq_put = i8xx_ring_put_irq; |
1809,7 → 1743,6 |
ring->flush = gen6_ring_flush; |
ring->add_request = gen6_add_request; |
ring->get_seqno = gen6_ring_get_seqno; |
ring->set_seqno = ring_set_seqno; |
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT; |
ring->irq_get = gen6_ring_get_irq; |
ring->irq_put = gen6_ring_put_irq; |
1825,7 → 1758,6 |
ring->flush = bsd_ring_flush; |
ring->add_request = i9xx_add_request; |
ring->get_seqno = ring_get_seqno; |
ring->set_seqno = ring_set_seqno; |
if (IS_GEN5(dev)) { |
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; |
ring->irq_get = gen5_ring_get_irq; |
1855,7 → 1787,6 |
ring->flush = blt_ring_flush; |
ring->add_request = gen6_add_request; |
ring->get_seqno = gen6_ring_get_seqno; |
ring->set_seqno = ring_set_seqno; |
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT; |
ring->irq_get = gen6_ring_get_irq; |
ring->irq_put = gen6_ring_put_irq; |
/drivers/video/drm/i915/intel_ringbuffer.h |
---|
90,8 → 90,6 |
*/ |
u32 (*get_seqno)(struct intel_ring_buffer *ring, |
bool lazy_coherency); |
void (*set_seqno)(struct intel_ring_buffer *ring, |
u32 seqno); |
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
u32 offset, u32 length, |
unsigned flags); |
180,13 → 178,6 |
return ring->status_page.page_addr[reg]; |
} |
static inline void |
intel_write_status_page(struct intel_ring_buffer *ring, |
int reg, u32 value) |
{ |
ring->status_page.page_addr[reg] = value; |
} |
/** |
* Reads a dword out of the status page, which is written to from the command |
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
217,7 → 208,7 |
} |
void intel_ring_advance(struct intel_ring_buffer *ring); |
int __must_check intel_ring_idle(struct intel_ring_buffer *ring); |
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno); |
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring); |
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring); |
/drivers/video/drm/i915/intel_sdvo.c |
---|
112,7 → 112,6 |
* It is only valid when using TMDS encoding and 8 bit per color mode. |
*/ |
uint32_t color_range; |
bool color_range_auto; |
/** |
* This is set if we're going to treat the device as TV-out. |
135,7 → 134,6 |
bool is_hdmi; |
bool has_hdmi_monitor; |
bool has_hdmi_audio; |
bool rgb_quant_range_selectable; |
/** |
* This is set if we detect output of sdvo device as LVDS and |
957,8 → 955,7 |
&tx_rate, 1); |
} |
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo, |
const struct drm_display_mode *adjusted_mode) |
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo) |
{ |
struct dip_infoframe avi_if = { |
.type = DIP_TYPE_AVI, |
967,13 → 964,6 |
}; |
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)]; |
if (intel_sdvo->rgb_quant_range_selectable) { |
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE) |
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED; |
else |
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL; |
} |
intel_dip_infoframe_csum(&avi_if); |
/* sdvo spec says that the ecc is handled by the hw, and it looks like |
1083,18 → 1073,6 |
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode); |
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier); |
if (intel_sdvo->color_range_auto) { |
/* See CEA-861-E - 5.1 Default Encoding Parameters */ |
if (intel_sdvo->has_hdmi_monitor && |
drm_match_cea_mode(adjusted_mode) > 1) |
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; |
else |
intel_sdvo->color_range = 0; |
} |
if (intel_sdvo->color_range) |
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE; |
return true; |
} |
1152,7 → 1130,7 |
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI); |
intel_sdvo_set_colorimetry(intel_sdvo, |
SDVO_COLORIMETRY_RGB256); |
intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode); |
intel_sdvo_set_avi_infoframe(intel_sdvo); |
} else |
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI); |
1184,7 → 1162,7 |
/* The real mode polarity is set by the SDVO commands, using |
* struct intel_sdvo_dtd. */ |
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH; |
if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi) |
if (intel_sdvo->is_hdmi) |
sdvox |= intel_sdvo->color_range; |
if (INTEL_INFO(dev)->gen < 5) |
sdvox |= SDVO_BORDER_ENABLE; |
1544,8 → 1522,6 |
if (intel_sdvo->is_hdmi) { |
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); |
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); |
intel_sdvo->rgb_quant_range_selectable = |
drm_rgb_quant_range_selectable(edid); |
} |
} else |
status = connector_status_disconnected; |
1597,7 → 1573,6 |
intel_sdvo->has_hdmi_monitor = false; |
intel_sdvo->has_hdmi_audio = false; |
intel_sdvo->rgb_quant_range_selectable = false; |
if ((intel_sdvo_connector->output_flag & response) == 0) |
ret = connector_status_disconnected; |
1909,6 → 1884,7 |
if (ret) |
return ret; |
#if 0 |
if (property == dev_priv->force_audio_property) { |
int i = val; |
bool has_audio; |
1931,23 → 1907,13 |
} |
if (property == dev_priv->broadcast_rgb_property) { |
switch (val) { |
case INTEL_BROADCAST_RGB_AUTO: |
intel_sdvo->color_range_auto = true; |
break; |
case INTEL_BROADCAST_RGB_FULL: |
intel_sdvo->color_range_auto = false; |
intel_sdvo->color_range = 0; |
break; |
case INTEL_BROADCAST_RGB_LIMITED: |
intel_sdvo->color_range_auto = false; |
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235; |
break; |
default: |
return -EINVAL; |
} |
if (val == !!intel_sdvo->color_range) |
return 0; |
intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0; |
goto done; |
} |
#endif |
#define CHECK_PROPERTY(name, NAME) \ |
if (intel_sdvo_connector->name == property) { \ |
2042,8 → 2008,11 |
done: |
if (intel_sdvo->base.base.crtc) |
intel_crtc_restore_mode(intel_sdvo->base.base.crtc); |
if (intel_sdvo->base.base.crtc) { |
struct drm_crtc *crtc = intel_sdvo->base.base.crtc; |
intel_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
return 0; |
#undef CHECK_PROPERTY |
2052,6 → 2021,7 |
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = { |
.mode_fixup = intel_sdvo_mode_fixup, |
.mode_set = intel_sdvo_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_connector_funcs intel_sdvo_connector_funcs = { |
2241,17 → 2211,14 |
} |
static void |
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo, |
struct intel_sdvo_connector *connector) |
intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector) |
{ |
struct drm_device *dev = connector->base.base.dev; |
intel_attach_force_audio_property(&connector->base.base); |
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) { |
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) |
intel_attach_broadcast_rgb_property(&connector->base.base); |
intel_sdvo->color_range_auto = true; |
} |
} |
static bool |
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) |
2298,7 → 2265,7 |
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
if (intel_sdvo->is_hdmi) |
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector); |
intel_sdvo_add_hdmi_properties(intel_sdvo_connector); |
return true; |
} |
/drivers/video/drm/i915/intel_sprite.c |
---|
50,7 → 50,6 |
u32 sprctl, sprscale = 0; |
unsigned long sprsurf_offset, linear_offset; |
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); |
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; |
sprctl = I915_READ(SPRCTL(pipe)); |
90,9 → 89,6 |
sprctl |= SPRITE_TRICKLE_FEED_DISABLE; |
sprctl |= SPRITE_ENABLE; |
if (IS_HASWELL(dev)) |
sprctl |= SPRITE_PIPE_CSC_ENABLE; |
/* Sizes are 0 based */ |
src_w--; |
src_h--; |
107,15 → 103,19 |
* when scaling is disabled. |
*/ |
if (crtc_w != src_w || crtc_h != src_h) { |
dev_priv->sprite_scaling_enabled |= 1 << pipe; |
if (!scaling_was_enabled) { |
if (!dev_priv->sprite_scaling_enabled) { |
dev_priv->sprite_scaling_enabled = true; |
intel_update_watermarks(dev); |
intel_wait_for_vblank(dev, pipe); |
} |
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; |
} else |
dev_priv->sprite_scaling_enabled &= ~(1 << pipe); |
} else { |
if (dev_priv->sprite_scaling_enabled) { |
dev_priv->sprite_scaling_enabled = false; |
/* potentially re-enable LP watermarks */ |
intel_update_watermarks(dev); |
} |
} |
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); |
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x); |
122,7 → 122,7 |
linear_offset = y * fb->pitches[0] + x * pixel_size; |
sprsurf_offset = |
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
intel_gen4_compute_offset_xtiled(&x, &y, |
pixel_size, fb->pitches[0]); |
linear_offset -= sprsurf_offset; |
141,10 → 141,6 |
I915_WRITE(SPRCTL(pipe), sprctl); |
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset); |
POSTING_READ(SPRSURF(pipe)); |
/* potentially re-enable LP watermarks */ |
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) |
intel_update_watermarks(dev); |
} |
static void |
154,7 → 150,6 |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct intel_plane *intel_plane = to_intel_plane(plane); |
int pipe = intel_plane->pipe; |
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled; |
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE); |
/* Can't leave the scaler enabled... */ |
164,10 → 159,7 |
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0); |
POSTING_READ(SPRSURF(pipe)); |
dev_priv->sprite_scaling_enabled &= ~(1 << pipe); |
/* potentially re-enable LP watermarks */ |
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled) |
dev_priv->sprite_scaling_enabled = false; |
intel_update_watermarks(dev); |
} |
295,7 → 287,7 |
linear_offset = y * fb->pitches[0] + x * pixel_size; |
dvssurf_offset = |
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode, |
intel_gen4_compute_offset_xtiled(&x, &y, |
pixel_size, fb->pitches[0]); |
linear_offset -= dvssurf_offset; |
599,7 → 591,7 |
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE); |
if (!obj) { |
612,7 → 604,7 |
ret = intel_plane->update_colorkey(plane, set); |
out_unlock: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
626,7 → 618,7 |
int ret = 0; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE); |
if (!obj) { |
639,7 → 631,7 |
intel_plane->get_colorkey(plane, get); |
out_unlock: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
/drivers/video/drm/i915/i915_gem_stolen.c |
---|
42,73 → 42,85 |
* for is a boon. |
*/ |
static unsigned long i915_stolen_to_physical(struct drm_device *dev) |
#define PTE_ADDRESS_MASK 0xfffff000 |
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */ |
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1) |
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */ |
#define PTE_MAPPING_TYPE_CACHED (3 << 1) |
#define PTE_MAPPING_TYPE_MASK (3 << 1) |
#define PTE_VALID (1 << 0) |
/** |
* i915_stolen_to_phys - take an offset into stolen memory and turn it into |
* a physical one |
* @dev: drm device |
* @offset: address to translate |
* |
* Some chip functions require allocations from stolen space and need the |
* physical address of the memory in question. |
*/ |
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct pci_dev *pdev = dev_priv->bridge_dev; |
u32 base; |
#if 0 |
/* On the machines I have tested the Graphics Base of Stolen Memory |
* is unreliable, so on those compute the base by subtracting the |
* stolen memory from the Top of Low Usable DRAM which is where the |
* BIOS places the graphics stolen memory. |
* |
* On gen2, the layout is slightly different with the Graphics Segment |
* immediately following Top of Memory (or Top of Usable DRAM). Note |
* it appears that TOUD is only reported by 865g, so we just use the |
* top of memory as determined by the e820 probe. |
* |
* XXX gen2 requires an unavailable symbol and 945gm fails with |
* its value of TOLUD. |
* is unreliable, so compute the base by subtracting the stolen memory |
* from the Top of Low Usable DRAM which is where the BIOS places |
* the graphics stolen memory. |
*/ |
base = 0; |
if (INTEL_INFO(dev)->gen >= 6) { |
/* Read Base Data of Stolen Memory Register (BDSM) directly. |
* Note that there is also a MCHBAR miror at 0x1080c0 or |
* we could use device 2:0x5c instead. |
*/ |
pci_read_config_dword(pdev, 0xB0, &base); |
base &= ~4095; /* lower bits used for locking register */ |
} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { |
/* Read Graphics Base of Stolen Memory directly */ |
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { |
/* top 32bits are reserved = 0 */ |
pci_read_config_dword(pdev, 0xA4, &base); |
#if 0 |
} else if (IS_GEN3(dev)) { |
} else { |
/* XXX presume 8xx is the same as i915 */ |
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base); |
} |
#else |
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) { |
u16 val; |
pci_read_config_word(pdev, 0xb0, &val); |
base = val >> 4 << 20; |
} else { |
u8 val; |
/* Stolen is immediately below Top of Low Usable DRAM */ |
pci_read_config_byte(pdev, 0x9c, &val); |
base = val >> 3 << 27; |
} |
base -= dev_priv->mm.gtt->stolen_size; |
} else { |
/* Stolen is immediately above Top of Memory */ |
base = max_low_pfn_mapped << PAGE_SHIFT; |
#endif |
return base + offset; |
} |
return base; |
static void i915_warn_stolen(struct drm_device *dev) |
{ |
DRM_INFO("not enough stolen space for compressed buffer, disabling\n"); |
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n"); |
} |
static int i915_setup_compression(struct drm_device *dev, int size) |
static void i915_setup_compression(struct drm_device *dev, int size) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); |
unsigned long cfb_base; |
unsigned long ll_base = 0; |
/* Try to over-allocate to reduce reallocations and fragmentation */ |
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, |
size <<= 1, 4096, 0); |
if (!compressed_fb) |
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, |
size >>= 1, 4096, 0); |
/* Just in case the BIOS is doing something questionable. */ |
intel_disable_fbc(dev); |
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); |
if (compressed_fb) |
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); |
if (!compressed_fb) |
goto err; |
if (HAS_PCH_SPLIT(dev)) |
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
else if (IS_GM45(dev)) { |
I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
} else { |
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start); |
if (!cfb_base) |
goto err_fb; |
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) { |
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen, |
4096, 4096, 0); |
if (compressed_llb) |
117,206 → 129,73 |
if (!compressed_llb) |
goto err_fb; |
dev_priv->compressed_llb = compressed_llb; |
I915_WRITE(FBC_CFB_BASE, |
dev_priv->mm.stolen_base + compressed_fb->start); |
I915_WRITE(FBC_LL_BASE, |
dev_priv->mm.stolen_base + compressed_llb->start); |
ll_base = i915_stolen_to_phys(dev, compressed_llb->start); |
if (!ll_base) |
goto err_llb; |
} |
dev_priv->compressed_fb = compressed_fb; |
dev_priv->cfb_size = size; |
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", |
size); |
dev_priv->compressed_fb = compressed_fb; |
if (HAS_PCH_SPLIT(dev)) |
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); |
else if (IS_GM45(dev)) { |
I915_WRITE(DPFC_CB_BASE, compressed_fb->start); |
} else { |
I915_WRITE(FBC_CFB_BASE, cfb_base); |
I915_WRITE(FBC_LL_BASE, ll_base); |
dev_priv->compressed_llb = compressed_llb; |
} |
return 0; |
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", |
cfb_base, ll_base, size >> 20); |
return; |
err_llb: |
drm_mm_put_block(compressed_llb); |
err_fb: |
drm_mm_put_block(compressed_fb); |
err: |
return -ENOSPC; |
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
i915_warn_stolen(dev); |
} |
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size) |
static void i915_cleanup_compression(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (dev_priv->mm.stolen_base == 0) |
return -ENODEV; |
if (size < dev_priv->cfb_size) |
return 0; |
/* Release any current block */ |
i915_gem_stolen_cleanup_compression(dev); |
return i915_setup_compression(dev, size); |
} |
void i915_gem_stolen_cleanup_compression(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
if (dev_priv->cfb_size == 0) |
return; |
if (dev_priv->compressed_fb) |
drm_mm_put_block(dev_priv->compressed_fb); |
if (dev_priv->compressed_llb) |
drm_mm_put_block(dev_priv->compressed_llb); |
dev_priv->cfb_size = 0; |
} |
void i915_gem_cleanup_stolen(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
i915_gem_stolen_cleanup_compression(dev); |
drm_mm_takedown(&dev_priv->mm.stolen); |
if (I915_HAS_FBC(dev) && i915_powersave) |
i915_cleanup_compression(dev); |
} |
int i915_gem_init_stolen(struct drm_device *dev) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size; |
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); |
if (dev_priv->mm.stolen_base == 0) |
return 0; |
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n", |
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base); |
/* Basic memrange allocator for stolen space */ |
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size); |
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size); |
return 0; |
} |
/* Try to set up FBC with a reasonable compressed buffer size */ |
if (I915_HAS_FBC(dev) && i915_powersave) { |
int cfb_size; |
static struct sg_table * |
i915_pages_create_for_stolen(struct drm_device *dev, |
u32 offset, u32 size) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct sg_table *st; |
struct scatterlist *sg; |
/* Leave 1M for line length buffer & misc. */ |
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size); |
BUG_ON(offset > dev_priv->gtt.stolen_size - size); |
/* We hide that we have no struct page backing our stolen object |
* by wrapping the contiguous physical allocation with a fake |
* dma mapping in a single scatterlist. |
*/ |
st = kmalloc(sizeof(*st), GFP_KERNEL); |
if (st == NULL) |
return NULL; |
if (sg_alloc_table(st, 1, GFP_KERNEL)) { |
kfree(st); |
return NULL; |
/* Try to get a 32M buffer... */ |
if (prealloc_size > (36*1024*1024)) |
cfb_size = 32*1024*1024; |
else /* fall back to 7/8 of the stolen space */ |
cfb_size = prealloc_size * 7 / 8; |
i915_setup_compression(dev, cfb_size); |
} |
sg = st->sgl; |
sg->offset = offset; |
sg->length = size; |
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset; |
sg_dma_len(sg) = size; |
return st; |
return 0; |
} |
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj) |
{ |
BUG(); |
return -EINVAL; |
} |
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj) |
{ |
/* Should only be called during free */ |
sg_free_table(obj->pages); |
kfree(obj->pages); |
} |
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { |
.get_pages = i915_gem_object_get_pages_stolen, |
.put_pages = i915_gem_object_put_pages_stolen, |
}; |
static struct drm_i915_gem_object * |
_i915_gem_object_create_stolen(struct drm_device *dev, |
struct drm_mm_node *stolen) |
{ |
struct drm_i915_gem_object *obj; |
obj = i915_gem_object_alloc(dev); |
if (obj == NULL) |
return NULL; |
if (drm_gem_private_object_init(dev, &obj->base, stolen->size)) |
goto cleanup; |
i915_gem_object_init(obj, &i915_gem_object_stolen_ops); |
obj->pages = i915_pages_create_for_stolen(dev, |
stolen->start, stolen->size); |
if (obj->pages == NULL) |
goto cleanup; |
obj->has_dma_mapping = true; |
obj->pages_pin_count = 1; |
obj->stolen = stolen; |
obj->base.write_domain = I915_GEM_DOMAIN_GTT; |
obj->base.read_domains = I915_GEM_DOMAIN_GTT; |
obj->cache_level = I915_CACHE_NONE; |
return obj; |
cleanup: |
i915_gem_object_free(obj); |
return NULL; |
} |
struct drm_i915_gem_object * |
i915_gem_object_create_stolen(struct drm_device *dev, u32 size) |
{ |
struct drm_i915_private *dev_priv = dev->dev_private; |
struct drm_i915_gem_object *obj; |
struct drm_mm_node *stolen; |
if (dev_priv->mm.stolen_base == 0) |
return NULL; |
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); |
if (size == 0) |
return NULL; |
stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0); |
if (stolen) |
stolen = drm_mm_get_block(stolen, size, 4096); |
if (stolen == NULL) |
return NULL; |
obj = _i915_gem_object_create_stolen(dev, stolen); |
if (obj) |
return obj; |
drm_mm_put_block(stolen); |
return NULL; |
} |
void |
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) |
{ |
if (obj->stolen) { |
drm_mm_put_block(obj->stolen); |
obj->stolen = NULL; |
} |
} |
/drivers/video/drm/i915/i915_gem_tiling.c |
---|
291,7 → 291,18 |
return false; |
} |
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode); |
/* |
* Previous chips need to be aligned to the size of the smallest |
* fence register that can contain the object. |
*/ |
if (INTEL_INFO(obj->base.dev)->gen == 3) |
size = 1024*1024; |
else |
size = 512*1024; |
while (size < obj->base.size) |
size <<= 1; |
if (obj->gtt_space->size != size) |
return false; |
376,15 → 387,15 |
obj->map_and_fenceable = |
obj->gtt_space == NULL || |
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end && |
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
i915_gem_object_fence_ok(obj, args->tiling_mode)); |
/* Rebind if we need a change of alignment */ |
if (!obj->map_and_fenceable) { |
u32 unfenced_alignment = |
i915_gem_get_gtt_alignment(dev, obj->base.size, |
args->tiling_mode, |
false); |
i915_gem_get_unfenced_gtt_alignment(dev, |
obj->base.size, |
args->tiling_mode); |
if (obj->gtt_offset & (unfenced_alignment - 1)) |
ret = i915_gem_object_unbind(obj); |
} |
404,18 → 415,6 |
/* we have to maintain this existing ABI... */ |
args->stride = obj->stride; |
args->tiling_mode = obj->tiling_mode; |
/* Try to preallocate memory required to save swizzling on put-pages */ |
if (i915_gem_object_needs_bit17_swizzle(obj)) { |
if (obj->bit_17 == NULL) { |
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) * |
sizeof(long), GFP_KERNEL); |
} |
} else { |
kfree(obj->bit_17); |
obj->bit_17 = NULL; |
} |
drm_gem_object_unreference(&obj->base); |
mutex_unlock(&dev->struct_mutex); |
/drivers/video/drm/i915/intel_dvo.c |
---|
345,6 → 345,7 |
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = { |
.mode_fixup = intel_dvo_mode_fixup, |
.mode_set = intel_dvo_mode_set, |
.disable = intel_encoder_noop, |
}; |
static const struct drm_connector_funcs intel_dvo_connector_funcs = { |
/drivers/video/drm/i915/sna/gen6_render.c |
---|
0,0 → 1,2163 |
/* |
* Copyright © 2006,2008,2011 Intel Corporation |
* Copyright © 2007 Red Hat, Inc. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
* |
* Authors: |
* Wang Zhenyu <zhenyu.z.wang@sna.com> |
* Eric Anholt <eric@anholt.net> |
* Carl Worth <cworth@redhat.com> |
* Keith Packard <keithp@keithp.com> |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#include <drmP.h> |
#include <drm.h> |
#include "i915_drm.h" |
#include "i915_drv.h" |
#include "intel_drv.h" |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <errno-base.h> |
#include <memory.h> |
#include <syscall.h> |
#include "../bitmap.h" |
#include "sna.h" |
//#include "sna_reg.h" |
#include "sna_render.h" |
//#include "sna_render_inline.h" |
//#include "sna_video.h" |
#include "gen6_render.h" |
#define NO_COMPOSITE 0 |
#define NO_COMPOSITE_SPANS 0 |
#define NO_COPY 0 |
#define NO_COPY_BOXES 0 |
#define NO_FILL 0 |
#define NO_FILL_BOXES 0 |
#define NO_CLEAR 0 |
#define NO_RING_SWITCH 1 |
#define GEN6_MAX_SIZE 8192 |
static const uint32_t ps_kernel_nomask_affine[][4] = { |
#include "exa_wm_src_affine.g6b" |
#include "exa_wm_src_sample_argb.g6b" |
#include "exa_wm_write.g6b" |
}; |
static const uint32_t ps_kernel_masknoca_affine[][4] = { |
#include "exa_wm_src_affine.g6b" |
#include "exa_wm_src_sample_argb.g6b" |
#include "exa_wm_mask_affine.g6b" |
#include "exa_wm_mask_sample_a.g6b" |
#include "exa_wm_noca.g6b" |
#include "exa_wm_write.g6b" |
}; |
#define KERNEL(kernel_enum, kernel, masked) \ |
[GEN6_WM_KERNEL_##kernel_enum] = {#kernel_enum, kernel, sizeof(kernel), masked} |
static const struct wm_kernel_info { |
const char *name; |
const void *data; |
unsigned int size; |
Bool has_mask; |
} wm_kernels[] = { |
// KERNEL(NOMASK, ps_kernel_nomask_affine, FALSE), |
// KERNEL(MASK, ps_kernel_masknoca_affine, TRUE), |
KERNEL(NOMASK, ps_kernel_masknoca_affine, TRUE), |
KERNEL(MASK, ps_kernel_masknoca_affine, TRUE), |
}; |
#undef KERNEL |
static const struct blendinfo { |
Bool src_alpha; |
uint32_t src_blend; |
uint32_t dst_blend; |
} gen6_blend_op[] = { |
/* Clear */ {0, GEN6_BLENDFACTOR_ZERO, GEN6_BLENDFACTOR_ZERO}, |
/* Src */ {0, GEN6_BLENDFACTOR_ONE, GEN6_BLENDFACTOR_ZERO}, |
/* Dst */ {0, GEN6_BLENDFACTOR_ZERO, GEN6_BLENDFACTOR_ONE}, |
/* Over */ {1, GEN6_BLENDFACTOR_ONE, GEN6_BLENDFACTOR_INV_SRC_ALPHA}, |
/* OverReverse */ {0, GEN6_BLENDFACTOR_INV_DST_ALPHA, GEN6_BLENDFACTOR_ONE}, |
/* In */ {0, GEN6_BLENDFACTOR_DST_ALPHA, GEN6_BLENDFACTOR_ZERO}, |
/* InReverse */ {1, GEN6_BLENDFACTOR_ZERO, GEN6_BLENDFACTOR_SRC_ALPHA}, |
/* Out */ {0, GEN6_BLENDFACTOR_INV_DST_ALPHA, GEN6_BLENDFACTOR_ZERO}, |
/* OutReverse */ {1, GEN6_BLENDFACTOR_ZERO, GEN6_BLENDFACTOR_INV_SRC_ALPHA}, |
/* Atop */ {1, GEN6_BLENDFACTOR_DST_ALPHA, GEN6_BLENDFACTOR_INV_SRC_ALPHA}, |
/* AtopReverse */ {1, GEN6_BLENDFACTOR_INV_DST_ALPHA, GEN6_BLENDFACTOR_SRC_ALPHA}, |
/* Xor */ {1, GEN6_BLENDFACTOR_INV_DST_ALPHA, GEN6_BLENDFACTOR_INV_SRC_ALPHA}, |
/* Add */ {0, GEN6_BLENDFACTOR_ONE, GEN6_BLENDFACTOR_ONE}, |
}; |
/** |
* Highest-valued BLENDFACTOR used in gen6_blend_op. |
* |
* This leaves out GEN6_BLENDFACTOR_INV_DST_COLOR, |
* GEN6_BLENDFACTOR_INV_CONST_{COLOR,ALPHA}, |
* GEN6_BLENDFACTOR_INV_SRC1_{COLOR,ALPHA} |
*/ |
#define GEN6_BLENDFACTOR_COUNT (GEN6_BLENDFACTOR_INV_DST_ALPHA + 1) |
/* FIXME: surface format defined in gen6_defines.h, shared Sampling engine |
* 1.7.2 |
static const struct formatinfo { |
CARD32 pict_fmt; |
uint32_t card_fmt; |
} gen6_tex_formats[] = { |
{PICT_a8, GEN6_SURFACEFORMAT_A8_UNORM}, |
{PICT_a8r8g8b8, GEN6_SURFACEFORMAT_B8G8R8A8_UNORM}, |
{PICT_x8r8g8b8, GEN6_SURFACEFORMAT_B8G8R8X8_UNORM}, |
{PICT_a8b8g8r8, GEN6_SURFACEFORMAT_R8G8B8A8_UNORM}, |
{PICT_x8b8g8r8, GEN6_SURFACEFORMAT_R8G8B8X8_UNORM}, |
{PICT_r8g8b8, GEN6_SURFACEFORMAT_R8G8B8_UNORM}, |
{PICT_r5g6b5, GEN6_SURFACEFORMAT_B5G6R5_UNORM}, |
{PICT_a1r5g5b5, GEN6_SURFACEFORMAT_B5G5R5A1_UNORM}, |
{PICT_a2r10g10b10, GEN6_SURFACEFORMAT_B10G10R10A2_UNORM}, |
{PICT_x2r10g10b10, GEN6_SURFACEFORMAT_B10G10R10X2_UNORM}, |
{PICT_a2b10g10r10, GEN6_SURFACEFORMAT_R10G10B10A2_UNORM}, |
{PICT_x2r10g10b10, GEN6_SURFACEFORMAT_B10G10R10X2_UNORM}, |
{PICT_a4r4g4b4, GEN6_SURFACEFORMAT_B4G4R4A4_UNORM}, |
}; |
*/ |
#define GEN6_BLEND_STATE_PADDED_SIZE ALIGN(sizeof(struct gen6_blend_state), 64) |
#define BLEND_OFFSET(s, d) \ |
(((s) * GEN6_BLENDFACTOR_COUNT + (d)) * GEN6_BLEND_STATE_PADDED_SIZE) |
#define SAMPLER_OFFSET(sf, se, mf, me) \ |
(((((sf) * EXTEND_COUNT + (se)) * FILTER_COUNT + (mf)) * EXTEND_COUNT + (me)) * 2 * sizeof(struct gen6_sampler_state)) |
#define OUT_BATCH(v) batch_emit(sna, v) |
#define OUT_VERTEX(x,y) vertex_emit_2s(sna, x,y) |
#define OUT_VERTEX_F(v) vertex_emit(sna, v) |
static inline bool too_large(int width, int height) |
{ |
return width > GEN6_MAX_SIZE || height > GEN6_MAX_SIZE; |
} |
static uint32_t gen6_get_blend(int op, |
bool has_component_alpha, |
uint32_t dst_format) |
{ |
uint32_t src, dst; |
// src = GEN6_BLENDFACTOR_ONE; //gen6_blend_op[op].src_blend; |
// dst = GEN6_BLENDFACTOR_ZERO; //gen6_blend_op[op].dst_blend; |
src = GEN6_BLENDFACTOR_ONE; //gen6_blend_op[op].src_blend; |
dst = GEN6_BLENDFACTOR_INV_SRC_ALPHA; //gen6_blend_op[op].dst_blend; |
#if 0 |
/* If there's no dst alpha channel, adjust the blend op so that |
* we'll treat it always as 1. |
*/ |
if (PICT_FORMAT_A(dst_format) == 0) { |
if (src == GEN6_BLENDFACTOR_DST_ALPHA) |
src = GEN6_BLENDFACTOR_ONE; |
else if (src == GEN6_BLENDFACTOR_INV_DST_ALPHA) |
src = GEN6_BLENDFACTOR_ZERO; |
} |
/* If the source alpha is being used, then we should only be in a |
* case where the source blend factor is 0, and the source blend |
* value is the mask channels multiplied by the source picture's alpha. |
*/ |
if (has_component_alpha && gen6_blend_op[op].src_alpha) { |
if (dst == GEN6_BLENDFACTOR_SRC_ALPHA) |
dst = GEN6_BLENDFACTOR_SRC_COLOR; |
else if (dst == GEN6_BLENDFACTOR_INV_SRC_ALPHA) |
dst = GEN6_BLENDFACTOR_INV_SRC_COLOR; |
} |
DBG(("blend op=%d, dst=%x [A=%d] => src=%d, dst=%d => offset=%x\n", |
op, dst_format, PICT_FORMAT_A(dst_format), |
src, dst, (int)BLEND_OFFSET(src, dst))); |
#endif |
return BLEND_OFFSET(src, dst); |
} |
static uint32_t gen6_get_dest_format(CARD32 format) |
{ |
return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM; |
/* |
switch (format) { |
default: |
assert(0); |
case PICT_a8r8g8b8: |
case PICT_x8r8g8b8: |
return GEN6_SURFACEFORMAT_B8G8R8A8_UNORM; |
case PICT_a8b8g8r8: |
case PICT_x8b8g8r8: |
return GEN6_SURFACEFORMAT_R8G8B8A8_UNORM; |
case PICT_a2r10g10b10: |
case PICT_x2r10g10b10: |
return GEN6_SURFACEFORMAT_B10G10R10A2_UNORM; |
case PICT_r5g6b5: |
return GEN6_SURFACEFORMAT_B5G6R5_UNORM; |
case PICT_x1r5g5b5: |
case PICT_a1r5g5b5: |
return GEN6_SURFACEFORMAT_B5G5R5A1_UNORM; |
case PICT_a8: |
return GEN6_SURFACEFORMAT_A8_UNORM; |
case PICT_a4r4g4b4: |
case PICT_x4r4g4b4: |
return GEN6_SURFACEFORMAT_B4G4R4A4_UNORM; |
} |
*/ |
} |
#if 0 |
static Bool gen6_check_dst_format(PictFormat format) |
{ |
switch (format) { |
case PICT_a8r8g8b8: |
case PICT_x8r8g8b8: |
case PICT_a8b8g8r8: |
case PICT_x8b8g8r8: |
case PICT_a2r10g10b10: |
case PICT_x2r10g10b10: |
case PICT_r5g6b5: |
case PICT_x1r5g5b5: |
case PICT_a1r5g5b5: |
case PICT_a8: |
case PICT_a4r4g4b4: |
case PICT_x4r4g4b4: |
return TRUE; |
} |
return FALSE; |
} |
static bool gen6_check_format(uint32_t format) |
{ |
switch (format) { |
case PICT_a8r8g8b8: |
case PICT_x8r8g8b8: |
case PICT_a8b8g8r8: |
case PICT_x8b8g8r8: |
case PICT_a2r10g10b10: |
case PICT_x2r10g10b10: |
case PICT_r8g8b8: |
case PICT_r5g6b5: |
case PICT_a1r5g5b5: |
case PICT_a8: |
case PICT_a4r4g4b4: |
case PICT_x4r4g4b4: |
return true; |
default: |
DBG(("%s: unhandled format: %x\n", __FUNCTION__, format)); |
return false; |
} |
} |
static uint32_t gen6_filter(uint32_t filter) |
{ |
switch (filter) { |
default: |
assert(0); |
case PictFilterNearest: |
return SAMPLER_FILTER_NEAREST; |
case PictFilterBilinear: |
return SAMPLER_FILTER_BILINEAR; |
} |
} |
static uint32_t gen6_check_filter(PicturePtr picture) |
{ |
switch (picture->filter) { |
case PictFilterNearest: |
case PictFilterBilinear: |
return TRUE; |
default: |
return FALSE; |
} |
} |
static uint32_t gen6_repeat(uint32_t repeat) |
{ |
switch (repeat) { |
default: |
assert(0); |
case RepeatNone: |
return SAMPLER_EXTEND_NONE; |
case RepeatNormal: |
return SAMPLER_EXTEND_REPEAT; |
case RepeatPad: |
return SAMPLER_EXTEND_PAD; |
case RepeatReflect: |
return SAMPLER_EXTEND_REFLECT; |
} |
} |
static bool gen6_check_repeat(PicturePtr picture) |
{ |
if (!picture->repeat) |
return TRUE; |
switch (picture->repeatType) { |
case RepeatNone: |
case RepeatNormal: |
case RepeatPad: |
case RepeatReflect: |
return TRUE; |
default: |
return FALSE; |
} |
} |
#endif |
static int |
gen6_choose_composite_kernel(int op, Bool has_mask, Bool is_ca, Bool is_affine) |
{ |
int base; |
if (has_mask) { |
/* |
if (is_ca) { |
if (gen6_blend_op[op].src_alpha) |
base = GEN6_WM_KERNEL_MASKCA_SRCALPHA; |
else |
base = GEN6_WM_KERNEL_MASKCA; |
} else |
base = GEN6_WM_KERNEL_MASK; |
*/ |
} else |
base = GEN6_WM_KERNEL_NOMASK; |
return base + !is_affine; |
} |
static void |
gen6_emit_urb(struct sna *sna) |
{ |
OUT_BATCH(GEN6_3DSTATE_URB | (3 - 2)); |
OUT_BATCH(((1 - 1) << GEN6_3DSTATE_URB_VS_SIZE_SHIFT) | |
(24 << GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT)); /* at least 24 on GEN6 */ |
OUT_BATCH((0 << GEN6_3DSTATE_URB_GS_SIZE_SHIFT) | |
(0 << GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT)); /* no GS thread */ |
} |
static void |
gen6_emit_state_base_address(struct sna *sna) |
{ |
OUT_BATCH(GEN6_STATE_BASE_ADDRESS | (10 - 2)); |
OUT_BATCH(0); /* general */ |
OUT_BATCH((sna->kgem.batch_obj->gtt_offset+ |
sna->kgem.batch_idx*4096)|BASE_ADDRESS_MODIFY); |
OUT_BATCH(sna->render_state.gen6.general_bo->gaddr|BASE_ADDRESS_MODIFY); |
OUT_BATCH(0); /* indirect */ |
OUT_BATCH(sna->render_state.gen6.general_bo->gaddr|BASE_ADDRESS_MODIFY); |
/* upper bounds, disable */ |
OUT_BATCH(0); |
OUT_BATCH(BASE_ADDRESS_MODIFY); |
OUT_BATCH(0); |
OUT_BATCH(BASE_ADDRESS_MODIFY); |
} |
static void |
gen6_emit_viewports(struct sna *sna) |
{ |
OUT_BATCH(GEN6_3DSTATE_VIEWPORT_STATE_POINTERS | |
GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC | |
(4 - 2)); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(sna->render_state.gen6.cc_vp); |
} |
static void |
gen6_emit_vs(struct sna *sna) |
{ |
/* disable VS constant buffer */ |
OUT_BATCH(GEN6_3DSTATE_CONSTANT_VS | (5 - 2)); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(GEN6_3DSTATE_VS | (6 - 2)); |
OUT_BATCH(0); /* no VS kernel */ |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); /* pass-through */ |
} |
static void |
gen6_emit_gs(struct sna *sna) |
{ |
/* disable GS constant buffer */ |
OUT_BATCH(GEN6_3DSTATE_CONSTANT_GS | (5 - 2)); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(GEN6_3DSTATE_GS | (7 - 2)); |
OUT_BATCH(0); /* no GS kernel */ |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); /* pass-through */ |
} |
static void |
gen6_emit_clip(struct sna *sna) |
{ |
OUT_BATCH(GEN6_3DSTATE_CLIP | (4 - 2)); |
OUT_BATCH(0); |
OUT_BATCH(0); /* pass-through */ |
OUT_BATCH(0); |
} |
static void |
gen6_emit_wm_constants(struct sna *sna) |
{ |
/* disable WM constant buffer */ |
OUT_BATCH(GEN6_3DSTATE_CONSTANT_PS | (5 - 2)); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
} |
static void |
gen6_emit_null_depth_buffer(struct sna *sna) |
{ |
OUT_BATCH(GEN6_3DSTATE_DEPTH_BUFFER | (7 - 2)); |
OUT_BATCH(GEN6_SURFACE_NULL << GEN6_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT | |
GEN6_DEPTHFORMAT_D32_FLOAT << GEN6_3DSTATE_DEPTH_BUFFER_FORMAT_SHIFT); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(GEN6_3DSTATE_CLEAR_PARAMS | (2 - 2)); |
OUT_BATCH(0); |
} |
static void |
gen6_emit_invariant(struct sna *sna) |
{ |
OUT_BATCH(GEN6_PIPELINE_SELECT | PIPELINE_SELECT_3D); |
OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE | (3 - 2)); |
OUT_BATCH(GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER | |
GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1); /* 1 sample/pixel */ |
OUT_BATCH(0); |
OUT_BATCH(GEN6_3DSTATE_SAMPLE_MASK | (2 - 2)); |
OUT_BATCH(1); |
gen6_emit_urb(sna); |
gen6_emit_state_base_address(sna); |
gen6_emit_viewports(sna); |
gen6_emit_vs(sna); |
gen6_emit_gs(sna); |
gen6_emit_clip(sna); |
gen6_emit_wm_constants(sna); |
gen6_emit_null_depth_buffer(sna); |
sna->render_state.gen6.needs_invariant = FALSE; |
} |
static bool |
gen6_emit_cc(struct sna *sna, |
int op, bool has_component_alpha, uint32_t dst_format) |
{ |
struct gen6_render_state *render = &sna->render_state.gen6; |
uint32_t blend; |
blend = gen6_get_blend(op, has_component_alpha, dst_format); |
DBG(("%s(op=%d, ca=%d, format=%x): new=%x, current=%x\n", |
__FUNCTION__, |
op, has_component_alpha, dst_format, |
blend, render->blend)); |
if (render->blend == blend) |
return op <= PictOpSrc; |
OUT_BATCH(GEN6_3DSTATE_CC_STATE_POINTERS | (4 - 2)); |
OUT_BATCH((render->cc_blend + blend) | 1); |
if (render->blend == (unsigned)-1) { |
OUT_BATCH(1); |
OUT_BATCH(1); |
} else { |
OUT_BATCH(0); |
OUT_BATCH(0); |
} |
render->blend = blend; |
return op <= PictOpSrc; |
} |
static void |
gen6_emit_sampler(struct sna *sna, uint32_t state) |
{ |
assert(state < |
2 * sizeof(struct gen6_sampler_state) * |
FILTER_COUNT * EXTEND_COUNT * |
FILTER_COUNT * EXTEND_COUNT); |
if (sna->render_state.gen6.samplers == state) |
return; |
sna->render_state.gen6.samplers = state; |
OUT_BATCH(GEN6_3DSTATE_SAMPLER_STATE_POINTERS | |
GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS | |
(4 - 2)); |
OUT_BATCH(0); /* VS */ |
OUT_BATCH(0); /* GS */ |
OUT_BATCH(sna->render_state.gen6.wm_state + state); |
} |
static void |
gen6_emit_sf(struct sna *sna, Bool has_mask) |
{ |
int num_sf_outputs = has_mask ? 2 : 1; |
if (sna->render_state.gen6.num_sf_outputs == num_sf_outputs) |
return; |
DBG(("%s: num_sf_outputs=%d, read_length=%d, read_offset=%d\n", |
__FUNCTION__, num_sf_outputs, 1, 0)); |
sna->render_state.gen6.num_sf_outputs = num_sf_outputs; |
OUT_BATCH(GEN6_3DSTATE_SF | (20 - 2)); |
OUT_BATCH(num_sf_outputs << GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT | |
1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT | |
1 << GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT); |
OUT_BATCH(0); |
OUT_BATCH(GEN6_3DSTATE_SF_CULL_NONE); |
OUT_BATCH(2 << GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT); /* DW4 */ |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); /* DW9 */ |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); /* DW14 */ |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); /* DW19 */ |
} |
static void |
gen6_emit_wm(struct sna *sna, unsigned int kernel, int nr_surfaces, int nr_inputs) |
{ |
if (sna->render_state.gen6.kernel == kernel) |
return; |
sna->render_state.gen6.kernel = kernel; |
DBG(("%s: switching to %s\n", __FUNCTION__, wm_kernels[kernel].name)); |
OUT_BATCH(GEN6_3DSTATE_WM | (9 - 2)); |
OUT_BATCH(sna->render_state.gen6.wm_kernel[kernel]); |
OUT_BATCH(1 << GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT | |
nr_surfaces << GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT); |
OUT_BATCH(0); |
OUT_BATCH(6 << GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT); /* DW4 */ |
OUT_BATCH((40 - 1) << GEN6_3DSTATE_WM_MAX_THREADS_SHIFT | |
GEN6_3DSTATE_WM_DISPATCH_ENABLE | |
GEN6_3DSTATE_WM_16_DISPATCH_ENABLE); |
OUT_BATCH(nr_inputs << GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT | |
GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC); |
OUT_BATCH(0); |
OUT_BATCH(0); |
} |
static bool |
gen6_emit_binding_table(struct sna *sna, uint16_t offset) |
{ |
if (sna->render_state.gen6.surface_table == offset) |
return false; |
/* Binding table pointers */ |
OUT_BATCH(GEN6_3DSTATE_BINDING_TABLE_POINTERS | |
GEN6_3DSTATE_BINDING_TABLE_MODIFY_PS | |
(4 - 2)); |
OUT_BATCH(0); /* vs */ |
OUT_BATCH(0); /* gs */ |
/* Only the PS uses the binding table */ |
OUT_BATCH(offset*4); |
sna->render_state.gen6.surface_table = offset; |
return true; |
} |
static bool |
gen6_emit_drawing_rectangle(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
uint32_t limit = (op->dst.height - 1) << 16 | (op->dst.width - 1); |
uint32_t offset = (uint16_t)op->dst.y << 16 | (uint16_t)op->dst.x; |
assert(!too_large(op->dst.x, op->dst.y)); |
assert(!too_large(op->dst.width, op->dst.height)); |
if (sna->render_state.gen6.drawrect_limit == limit && |
sna->render_state.gen6.drawrect_offset == offset) |
return false; |
/* [DevSNB-C+{W/A}] Before any depth stall flush (including those |
* produced by non-pipelined state commands), software needs to first |
* send a PIPE_CONTROL with no bits set except Post-Sync Operation != |
* 0. |
* |
* [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent |
* BEFORE the pipe-control with a post-sync op and no write-cache |
* flushes. |
*/ |
OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2)); |
OUT_BATCH(GEN6_PIPE_CONTROL_CS_STALL | |
GEN6_PIPE_CONTROL_STALL_AT_SCOREBOARD); |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2)); |
OUT_BATCH(GEN6_PIPE_CONTROL_WRITE_TIME); |
OUT_BATCH(sna->render_state.gen6.general_bo->gaddr+64); |
OUT_BATCH(0); |
OUT_BATCH(GEN6_3DSTATE_DRAWING_RECTANGLE | (4 - 2)); |
OUT_BATCH(0); |
OUT_BATCH(limit); |
OUT_BATCH(offset); |
sna->render_state.gen6.drawrect_offset = offset; |
sna->render_state.gen6.drawrect_limit = limit; |
return true; |
} |
static void |
gen6_emit_vertex_elements(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
/* |
* vertex data in vertex buffer |
* position: (x, y) |
* texture coordinate 0: (u0, v0) if (is_affine is TRUE) else (u0, v0, w0) |
* texture coordinate 1 if (has_mask is TRUE): same as above |
*/ |
struct gen6_render_state *render = &sna->render_state.gen6; |
int nelem = op->mask.bo ? 2 : 1; |
int selem = op->is_affine ? 2 : 3; |
uint32_t w_component; |
uint32_t src_format; |
int id = op->u.gen6.ve_id; |
if (render->ve_id == id) |
return; |
render->ve_id = id; |
if (op->is_affine) { |
src_format = GEN6_SURFACEFORMAT_R32G32_FLOAT; |
w_component = GEN6_VFCOMPONENT_STORE_1_FLT; |
} else { |
src_format = GEN6_SURFACEFORMAT_R32G32B32_FLOAT; |
w_component = GEN6_VFCOMPONENT_STORE_SRC; |
} |
/* The VUE layout |
* dword 0-3: pad (0.0, 0.0, 0.0. 0.0) |
* dword 4-7: position (x, y, 1.0, 1.0), |
* dword 8-11: texture coordinate 0 (u0, v0, w0, 1.0) |
* dword 12-15: texture coordinate 1 (u1, v1, w1, 1.0) |
* |
* dword 4-15 are fetched from vertex buffer |
*/ |
OUT_BATCH(GEN6_3DSTATE_VERTEX_ELEMENTS | |
((2 * (2 + nelem)) + 1 - 2)); |
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
GEN6_SURFACEFORMAT_R32G32B32A32_FLOAT << VE0_FORMAT_SHIFT | |
0 << VE0_OFFSET_SHIFT); |
OUT_BATCH(GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_0_SHIFT | |
GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_1_SHIFT | |
GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_2_SHIFT | |
GEN6_VFCOMPONENT_STORE_0 << VE1_VFCOMPONENT_3_SHIFT); |
/* x,y */ |
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
GEN6_SURFACEFORMAT_R16G16_SSCALED << VE0_FORMAT_SHIFT | |
0 << VE0_OFFSET_SHIFT); /* offsets vb in bytes */ |
OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT | |
GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT | |
GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_2_SHIFT | |
GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT); |
/* u0, v0, w0 */ |
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
src_format << VE0_FORMAT_SHIFT | |
4 << VE0_OFFSET_SHIFT); /* offset vb in bytes */ |
OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT | |
GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT | |
w_component << VE1_VFCOMPONENT_2_SHIFT | |
GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT); |
/* u1, v1, w1 */ |
if (op->mask.bo) { |
OUT_BATCH(id << VE0_VERTEX_BUFFER_INDEX_SHIFT | VE0_VALID | |
src_format << VE0_FORMAT_SHIFT | |
((1 + selem) * 4) << VE0_OFFSET_SHIFT); /* vb offset in bytes */ |
OUT_BATCH(GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_0_SHIFT | |
GEN6_VFCOMPONENT_STORE_SRC << VE1_VFCOMPONENT_1_SHIFT | |
w_component << VE1_VFCOMPONENT_2_SHIFT | |
GEN6_VFCOMPONENT_STORE_1_FLT << VE1_VFCOMPONENT_3_SHIFT); |
} |
} |
static void |
gen6_emit_flush(struct sna *sna) |
{ |
OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2)); |
OUT_BATCH(GEN6_PIPE_CONTROL_WC_FLUSH | |
GEN6_PIPE_CONTROL_TC_FLUSH | |
GEN6_PIPE_CONTROL_CS_STALL); |
OUT_BATCH(0); |
OUT_BATCH(0); |
} |
static void |
gen6_emit_state(struct sna *sna, |
const struct sna_composite_op *op, |
uint16_t wm_binding_table) |
{ |
bool need_stall = wm_binding_table & 1; |
if (gen6_emit_cc(sna, op->op, op->has_component_alpha, op->dst.format)) |
need_stall = false; |
gen6_emit_sampler(sna, |
SAMPLER_OFFSET(op->src.filter, |
op->src.repeat, |
op->mask.filter, |
op->mask.repeat)); |
gen6_emit_sf(sna, op->mask.bo != NULL); |
gen6_emit_wm(sna, |
op->u.gen6.wm_kernel, |
op->u.gen6.nr_surfaces, |
op->u.gen6.nr_inputs); |
gen6_emit_vertex_elements(sna, op); |
need_stall |= gen6_emit_binding_table(sna, wm_binding_table & ~1); |
if (gen6_emit_drawing_rectangle(sna, op)) |
need_stall = false; |
// if (kgem_bo_is_dirty(op->src.bo) || kgem_bo_is_dirty(op->mask.bo)) { |
gen6_emit_flush(sna); |
kgem_clear_dirty(&sna->kgem); |
kgem_bo_mark_dirty(op->dst.bo); |
need_stall = false; |
// } |
if (need_stall) { |
OUT_BATCH(GEN6_PIPE_CONTROL | (4 - 2)); |
OUT_BATCH(GEN6_PIPE_CONTROL_CS_STALL | |
GEN6_PIPE_CONTROL_STALL_AT_SCOREBOARD); |
OUT_BATCH(0); |
OUT_BATCH(0); |
} |
} |
static void gen6_magic_ca_pass(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
struct gen6_render_state *state = &sna->render_state.gen6; |
if (!op->need_magic_ca_pass) |
return; |
DBG(("%s: CA fixup (%d -> %d)\n", __FUNCTION__, |
sna->render.vertex_start, sna->render.vertex_index)); |
gen6_emit_flush(sna); |
gen6_emit_cc(sna, PictOpAdd, TRUE, op->dst.format); |
gen6_emit_wm(sna, |
gen6_choose_composite_kernel(PictOpAdd, |
TRUE, TRUE, |
op->is_affine), |
3, 2); |
OUT_BATCH(GEN6_3DPRIMITIVE | |
GEN6_3DPRIMITIVE_VERTEX_SEQUENTIAL | |
_3DPRIM_RECTLIST << GEN6_3DPRIMITIVE_TOPOLOGY_SHIFT | |
0 << 9 | |
4); |
OUT_BATCH(sna->render.vertex_index - sna->render.vertex_start); |
OUT_BATCH(sna->render.vertex_start); |
OUT_BATCH(1); /* single instance */ |
OUT_BATCH(0); /* start instance location */ |
OUT_BATCH(0); /* index buffer offset, ignored */ |
state->last_primitive = sna->kgem.nbatch; |
} |
static void gen6_vertex_flush(struct sna *sna) |
{ |
assert(sna->render_state.gen6.vertex_offset); |
DBG(("%s[%x] = %d\n", __FUNCTION__, |
4*sna->render_state.gen6.vertex_offset, |
sna->render.vertex_index - sna->render.vertex_start)); |
sna->kgem.batch[sna->render_state.gen6.vertex_offset] = |
sna->render.vertex_index - sna->render.vertex_start; |
sna->render_state.gen6.vertex_offset = 0; |
} |
static int gen6_vertex_finish(struct sna *sna) |
{ |
struct kgem_bo *bo; |
unsigned int i; |
DBG(("%s: used=%d / %d\n", __FUNCTION__, |
sna->render.vertex_used, sna->render.vertex_size)); |
assert(sna->render.vertex_used); |
/* Note: we only need dword alignment (currently) */ |
/* |
bo = sna->render.vbo; |
if (bo) { |
for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) { |
if (sna->render.vertex_reloc[i]) { |
DBG(("%s: reloc[%d] = %d\n", __FUNCTION__, |
i, sna->render.vertex_reloc[i])); |
sna->kgem.batch[sna->render.vertex_reloc[i]] = |
kgem_add_reloc(&sna->kgem, |
sna->render.vertex_reloc[i], |
bo, |
I915_GEM_DOMAIN_VERTEX << 16, |
0); |
sna->kgem.batch[sna->render.vertex_reloc[i]+1] = |
kgem_add_reloc(&sna->kgem, |
sna->render.vertex_reloc[i]+1, |
bo, |
I915_GEM_DOMAIN_VERTEX << 16, |
0 + sna->render.vertex_used * 4 - 1); |
sna->render.vertex_reloc[i] = 0; |
} |
} |
sna->render.vertex_used = 0; |
sna->render.vertex_index = 0; |
sna->render_state.gen6.vb_id = 0; |
kgem_bo_destroy(&sna->kgem, bo); |
} |
*/ |
sna->render.vertices = NULL; |
sna->render.vbo = kgem_create_linear(&sna->kgem, 256*1024); |
if (sna->render.vbo) |
sna->render.vertices = kgem_bo_map__cpu(&sna->kgem, sna->render.vbo); |
if (sna->render.vertices == NULL) { |
kgem_bo_destroy(&sna->kgem, sna->render.vbo); |
sna->render.vbo = NULL; |
return 0; |
} |
// kgem_bo_sync__cpu(&sna->kgem, sna->render.vbo); |
if (sna->render.vertex_used) { |
DBG(("%s: copying initial buffer x %d to handle=%d\n", |
__FUNCTION__, |
sna->render.vertex_used, |
sna->render.vbo->handle)); |
memcpy(sna->render.vertices, |
sna->render.vertex_data, |
sizeof(float)*sna->render.vertex_used); |
} |
sna->render.vertex_size = 64 * 1024 - 1; |
return sna->render.vertex_size - sna->render.vertex_used; |
} |
static void gen6_vertex_close(struct sna *sna) |
{ |
struct kgem_bo *bo; |
unsigned int i, delta = 0; |
if (!sna->render.vertex_used) { |
assert(sna->render.vbo == NULL); |
assert(sna->render.vertices == sna->render.vertex_data); |
assert(sna->render.vertex_size == ARRAY_SIZE(sna->render.vertex_data)); |
return; |
} |
DBG(("%s: used=%d / %d\n", __FUNCTION__, |
sna->render.vertex_used, sna->render.vertex_size)); |
bo = sna->render.vbo; |
if (bo == NULL) { |
assert(sna->render.vertices == sna->render.vertex_data); |
assert(sna->render.vertex_used < ARRAY_SIZE(sna->render.vertex_data)); |
if (sna->kgem.nbatch + sna->render.vertex_used <= sna->kgem.surface) { |
DBG(("%s: copy to batch: %d @ %d\n", __FUNCTION__, |
sna->render.vertex_used, sna->kgem.nbatch)); |
memcpy(sna->kgem.batch + sna->kgem.nbatch, |
sna->render.vertex_data, |
sna->render.vertex_used * 4); |
delta = sna->kgem.nbatch * 4; |
bo = NULL; |
sna->kgem.nbatch += sna->render.vertex_used; |
} else { |
bo = kgem_create_linear(&sna->kgem, 4*sna->render.vertex_used); |
if (bo && !kgem_bo_write(&sna->kgem, bo, |
sna->render.vertex_data, |
4*sna->render.vertex_used)) { |
kgem_bo_destroy(&sna->kgem, bo); |
goto reset; |
} |
DBG(("%s: new vbo: %d\n", __FUNCTION__, |
sna->render.vertex_used)); |
} |
} |
for (i = 0; i < ARRAY_SIZE(sna->render.vertex_reloc); i++) { |
if (sna->render.vertex_reloc[i]) { |
DBG(("%s: reloc[%d] = %d\n", __FUNCTION__, |
i, sna->render.vertex_reloc[i])); |
sna->kgem.batch[sna->render.vertex_reloc[i]] = |
sna->kgem.batch_obj->gtt_offset+delta+ |
sna->kgem.batch_idx*4096; |
sna->kgem.batch[sna->render.vertex_reloc[i]+1] = |
sna->kgem.batch_obj->gtt_offset+delta+ |
sna->kgem.batch_idx*4096+ |
sna->render.vertex_used * 4 - 1; |
sna->render.vertex_reloc[i] = 0; |
} |
} |
// if (bo) |
// kgem_bo_destroy(&sna->kgem, bo); |
reset: |
sna->render.vertex_used = 0; |
sna->render.vertex_index = 0; |
sna->render_state.gen6.vb_id = 0; |
sna->render.vbo = NULL; |
sna->render.vertices = sna->render.vertex_data; |
sna->render.vertex_size = ARRAY_SIZE(sna->render.vertex_data); |
} |
typedef struct gen6_surface_state_padded { |
struct gen6_surface_state state; |
char pad[32 - sizeof(struct gen6_surface_state)]; |
} gen6_surface_state_padded; |
static void null_create(struct sna_static_stream *stream) |
{ |
/* A bunch of zeros useful for legacy border color and depth-stencil */ |
sna_static_stream_map(stream, 64, 64); |
} |
static void scratch_create(struct sna_static_stream *stream) |
{ |
/* 64 bytes of scratch space for random writes, such as |
* the pipe-control w/a. |
*/ |
sna_static_stream_map(stream, 64, 64); |
} |
static void |
sampler_state_init(struct gen6_sampler_state *sampler_state, |
sampler_filter_t filter, |
sampler_extend_t extend) |
{ |
sampler_state->ss0.lod_preclamp = 1; /* GL mode */ |
/* We use the legacy mode to get the semantics specified by |
* the Render extension. */ |
sampler_state->ss0.border_color_mode = GEN6_BORDER_COLOR_MODE_LEGACY; |
switch (filter) { |
default: |
case SAMPLER_FILTER_NEAREST: |
sampler_state->ss0.min_filter = GEN6_MAPFILTER_NEAREST; |
sampler_state->ss0.mag_filter = GEN6_MAPFILTER_NEAREST; |
break; |
case SAMPLER_FILTER_BILINEAR: |
sampler_state->ss0.min_filter = GEN6_MAPFILTER_LINEAR; |
sampler_state->ss0.mag_filter = GEN6_MAPFILTER_LINEAR; |
break; |
} |
switch (extend) { |
default: |
case SAMPLER_EXTEND_NONE: |
sampler_state->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER; |
sampler_state->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER; |
sampler_state->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP_BORDER; |
break; |
case SAMPLER_EXTEND_REPEAT: |
sampler_state->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_WRAP; |
sampler_state->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_WRAP; |
sampler_state->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_WRAP; |
break; |
case SAMPLER_EXTEND_PAD: |
sampler_state->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_CLAMP; |
sampler_state->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_CLAMP; |
sampler_state->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_CLAMP; |
break; |
case SAMPLER_EXTEND_REFLECT: |
sampler_state->ss1.r_wrap_mode = GEN6_TEXCOORDMODE_MIRROR; |
sampler_state->ss1.s_wrap_mode = GEN6_TEXCOORDMODE_MIRROR; |
sampler_state->ss1.t_wrap_mode = GEN6_TEXCOORDMODE_MIRROR; |
break; |
} |
} |
static uint32_t gen6_create_cc_viewport(struct sna_static_stream *stream) |
{ |
struct gen6_cc_viewport vp; |
vp.min_depth = -1.e35; |
vp.max_depth = 1.e35; |
return sna_static_stream_add(stream, &vp, sizeof(vp), 32); |
} |
#if 0 |
static uint32_t gen6_get_card_format(PictFormat format) |
{ |
unsigned int i; |
for (i = 0; i < ARRAY_SIZE(gen6_tex_formats); i++) { |
if (gen6_tex_formats[i].pict_fmt == format) |
return gen6_tex_formats[i].card_fmt; |
} |
return -1; |
} |
#endif |
static uint32_t |
gen6_tiling_bits(uint32_t tiling) |
{ |
return 0; |
/* |
switch (tiling) { |
default: assert(0); |
case I915_TILING_NONE: return 0; |
case I915_TILING_X: return GEN6_SURFACE_TILED; |
case I915_TILING_Y: return GEN6_SURFACE_TILED | GEN6_SURFACE_TILED_Y; |
} |
*/ |
} |
/** |
* Sets up the common fields for a surface state buffer for the given |
* picture in the given surface state buffer. |
*/ |
static int |
gen6_bind_bo(struct sna *sna, |
struct kgem_bo *bo, |
uint32_t width, |
uint32_t height, |
uint32_t format, |
Bool is_dst) |
{ |
uint32_t *ss; |
uint32_t domains; |
uint16_t offset; |
/* After the first bind, we manage the cache domains within the batch */ |
if (is_dst) { |
domains = I915_GEM_DOMAIN_RENDER << 16 |I915_GEM_DOMAIN_RENDER; |
// kgem_bo_mark_dirty(bo); |
} else |
domains = I915_GEM_DOMAIN_SAMPLER << 16; |
// offset = kgem_bo_get_binding(bo, format); |
// if (offset) { |
// DBG(("[%x] bo(handle=%x), format=%d, reuse %s binding\n", |
// offset, bo->handle, format, |
// domains & 0xffff ? "render" : "sampler")); |
// return offset; |
// } |
offset = sna->kgem.surface - sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t); |
offset *= sizeof(uint32_t); |
sna->kgem.surface -= |
sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t); |
ss = sna->kgem.batch + sna->kgem.surface; |
ss[0] = (GEN6_SURFACE_2D << GEN6_SURFACE_TYPE_SHIFT | |
GEN6_SURFACE_BLEND_ENABLED | |
format << GEN6_SURFACE_FORMAT_SHIFT); |
ss[1] = bo->gaddr; |
ss[2] = ((width - 1) << GEN6_SURFACE_WIDTH_SHIFT | |
(height - 1) << GEN6_SURFACE_HEIGHT_SHIFT); |
assert(bo->pitch <= (1 << 18)); |
ss[3] = (gen6_tiling_bits(0) | |
(bo->pitch - 1) << GEN6_SURFACE_PITCH_SHIFT); |
ss[4] = 0; |
ss[5] = 0; |
// kgem_bo_set_binding(bo, format, offset); |
DBG(("[%x] bind bo(handle=%d, addr=%d), format=%d, width=%d, height=%d, pitch=%d, tiling=%d -> %s\n", |
offset, bo->handle, ss[1], |
format, width, height, bo->pitch, bo->tiling, |
domains & 0xffff ? "render" : "sampler")); |
return offset; |
} |
static void gen6_emit_vertex_buffer(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
int id = op->u.gen6.ve_id; |
OUT_BATCH(GEN6_3DSTATE_VERTEX_BUFFERS | 3); |
OUT_BATCH(id << VB0_BUFFER_INDEX_SHIFT | VB0_VERTEXDATA | |
4*op->floats_per_vertex << VB0_BUFFER_PITCH_SHIFT); |
sna->render.vertex_reloc[id] = sna->kgem.nbatch; |
OUT_BATCH(0); |
OUT_BATCH(0); |
OUT_BATCH(0); |
sna->render_state.gen6.vb_id |= 1 << id; |
} |
static void gen6_emit_primitive(struct sna *sna) |
{ |
if (sna->kgem.nbatch == sna->render_state.gen6.last_primitive) { |
DBG(("%s: continuing previous primitive, start=%d, index=%d\n", |
__FUNCTION__, |
sna->render.vertex_start, |
sna->render.vertex_index)); |
sna->render_state.gen6.vertex_offset = sna->kgem.nbatch - 5; |
return; |
} |
OUT_BATCH(GEN6_3DPRIMITIVE | |
GEN6_3DPRIMITIVE_VERTEX_SEQUENTIAL | |
_3DPRIM_RECTLIST << GEN6_3DPRIMITIVE_TOPOLOGY_SHIFT | |
0 << 9 | |
4); |
sna->render_state.gen6.vertex_offset = sna->kgem.nbatch; |
OUT_BATCH(0); /* vertex count, to be filled in later */ |
OUT_BATCH(sna->render.vertex_index); |
OUT_BATCH(1); /* single instance */ |
OUT_BATCH(0); /* start instance location */ |
OUT_BATCH(0); /* index buffer offset, ignored */ |
sna->render.vertex_start = sna->render.vertex_index; |
DBG(("%s: started new primitive: index=%d\n", |
__FUNCTION__, sna->render.vertex_start)); |
sna->render_state.gen6.last_primitive = sna->kgem.nbatch; |
} |
static bool gen6_rectangle_begin(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
int id = 1 << op->u.gen6.ve_id; |
int ndwords; |
ndwords = op->need_magic_ca_pass ? 60 : 6; |
if ((sna->render_state.gen6.vb_id & id) == 0) |
ndwords += 5; |
if (!kgem_check_batch(&sna->kgem, ndwords)) |
return false; |
if ((sna->render_state.gen6.vb_id & id) == 0) |
gen6_emit_vertex_buffer(sna, op); |
gen6_emit_primitive(sna); |
return true; |
} |
static int gen6_get_rectangles__flush(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
if (sna->render_state.gen6.vertex_offset) { |
gen6_vertex_flush(sna); |
gen6_magic_ca_pass(sna, op); |
} |
if (!kgem_check_batch(&sna->kgem, op->need_magic_ca_pass ? 65 : 5)) |
return 0; |
if (sna->kgem.nexec > KGEM_EXEC_SIZE(&sna->kgem) - 1) |
return 0; |
if (sna->kgem.nreloc > KGEM_RELOC_SIZE(&sna->kgem) - 2) |
return 0; |
return gen6_vertex_finish(sna); |
} |
inline static int gen6_get_rectangles(struct sna *sna, |
const struct sna_composite_op *op, |
int want) |
{ |
int rem = vertex_space(sna); |
if (rem < op->floats_per_rect) { |
DBG(("flushing vbo for %s: %d < %d\n", |
__FUNCTION__, rem, op->floats_per_rect)); |
rem = gen6_get_rectangles__flush(sna, op); |
if (rem == 0) |
return 0; |
} |
if (sna->render_state.gen6.vertex_offset == 0 && |
!gen6_rectangle_begin(sna, op)) |
return 0; |
if (want > 1 && want * op->floats_per_rect > rem) |
want = rem / op->floats_per_rect; |
assert(want > 0); |
sna->render.vertex_index += 3*want; |
return want; |
} |
inline static uint32_t *gen6_composite_get_binding_table(struct sna *sna, |
uint16_t *offset) |
{ |
uint32_t *table; |
sna->kgem.surface -= |
sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t); |
/* Clear all surplus entries to zero in case of prefetch */ |
table = memset(sna->kgem.batch + sna->kgem.surface, |
0, sizeof(struct gen6_surface_state_padded)); |
DBG(("%s(%x)\n", __FUNCTION__, 4*sna->kgem.surface)); |
*offset = sna->kgem.surface; |
return table; |
} |
static uint32_t |
gen6_choose_composite_vertex_buffer(const struct sna_composite_op *op) |
{ |
int has_mask = op->mask.bo != NULL; |
int is_affine = op->is_affine; |
return has_mask << 1 | is_affine; |
} |
static void |
gen6_get_batch(struct sna *sna) |
{ |
kgem_set_mode(&sna->kgem, KGEM_RENDER); |
/* |
if (!kgem_check_batch_with_surfaces(&sna->kgem, 150, 4)) { |
DBG(("%s: flushing batch: %d < %d+%d\n", |
__FUNCTION__, sna->kgem.surface - sna->kgem.nbatch, |
150, 4*8)); |
kgem_submit(&sna->kgem); |
_kgem_set_mode(&sna->kgem, KGEM_RENDER); |
} |
*/ |
if (sna->render_state.gen6.needs_invariant) |
gen6_emit_invariant(sna); |
} |
static void gen6_emit_composite_state(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
uint32_t *binding_table; |
uint16_t offset; |
bool dirty; |
gen6_get_batch(sna); |
dirty = FALSE; |
binding_table = gen6_composite_get_binding_table(sna, &offset); |
binding_table[0] = |
gen6_bind_bo(sna, |
op->dst.bo, op->dst.width, op->dst.height, |
op->dst.format, |
TRUE); |
binding_table[1] = |
gen6_bind_bo(sna, |
op->src.bo, op->src.width, op->src.height, |
op->src.card_format, |
FALSE); |
if (op->mask.bo) { |
binding_table[2] = |
gen6_bind_bo(sna, |
op->mask.bo, |
op->mask.width, |
op->mask.height, |
op->mask.card_format, |
FALSE); |
} |
if (sna->kgem.surface == offset && |
*(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table && |
(op->mask.bo == NULL || |
sna->kgem.batch[sna->render_state.gen6.surface_table+2] == binding_table[2])) { |
sna->kgem.surface += sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t); |
offset = sna->render_state.gen6.surface_table; |
} |
gen6_emit_state(sna, op, offset | dirty); |
} |
static void |
gen6_align_vertex(struct sna *sna, const struct sna_composite_op *op) |
{ |
assert (sna->render_state.gen6.vertex_offset == 0); |
if (op->floats_per_vertex != sna->render_state.gen6.floats_per_vertex) { |
if (sna->render.vertex_size - sna->render.vertex_used < 2*op->floats_per_rect) |
/* XXX propagate failure */ |
gen6_vertex_finish(sna); |
DBG(("aligning vertex: was %d, now %d floats per vertex, %d->%d\n", |
sna->render_state.gen6.floats_per_vertex, |
op->floats_per_vertex, |
sna->render.vertex_index, |
(sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex)); |
sna->render.vertex_index = (sna->render.vertex_used + op->floats_per_vertex - 1) / op->floats_per_vertex; |
sna->render.vertex_used = sna->render.vertex_index * op->floats_per_vertex; |
sna->render_state.gen6.floats_per_vertex = op->floats_per_vertex; |
} |
} |
#ifndef MAX |
#define MAX(a,b) ((a) > (b) ? (a) : (b)) |
#endif |
static uint32_t |
gen6_composite_create_blend_state(struct sna_static_stream *stream) |
{ |
char *base, *ptr; |
int src, dst; |
base = sna_static_stream_map(stream, |
GEN6_BLENDFACTOR_COUNT * GEN6_BLENDFACTOR_COUNT * GEN6_BLEND_STATE_PADDED_SIZE, |
64); |
ptr = base; |
for (src = 0; src < GEN6_BLENDFACTOR_COUNT; src++) { |
for (dst= 0; dst < GEN6_BLENDFACTOR_COUNT; dst++) { |
struct gen6_blend_state *blend = |
(struct gen6_blend_state *)ptr; |
blend->blend0.dest_blend_factor = dst; |
blend->blend0.source_blend_factor = src; |
blend->blend0.blend_func = GEN6_BLENDFUNCTION_ADD; |
blend->blend0.blend_enable = |
!(dst == GEN6_BLENDFACTOR_ZERO && src == GEN6_BLENDFACTOR_ONE); |
blend->blend1.post_blend_clamp_enable = 1; |
blend->blend1.pre_blend_clamp_enable = 1; |
ptr += GEN6_BLEND_STATE_PADDED_SIZE; |
} |
} |
return sna_static_stream_offsetof(stream, base); |
} |
#if 0 |
static uint32_t gen6_bind_video_source(struct sna *sna, |
struct kgem_bo *src_bo, |
uint32_t src_offset, |
int src_width, |
int src_height, |
int src_pitch, |
uint32_t src_surf_format) |
{ |
struct gen6_surface_state *ss; |
sna->kgem.surface -= sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t); |
ss = memset(sna->kgem.batch + sna->kgem.surface, 0, sizeof(*ss)); |
ss->ss0.surface_type = GEN6_SURFACE_2D; |
ss->ss0.surface_format = src_surf_format; |
ss->ss1.base_addr = |
kgem_add_reloc(&sna->kgem, |
sna->kgem.surface + 1, |
src_bo, |
I915_GEM_DOMAIN_SAMPLER << 16, |
src_offset); |
ss->ss2.width = src_width - 1; |
ss->ss2.height = src_height - 1; |
ss->ss3.pitch = src_pitch - 1; |
return sna->kgem.surface * sizeof(uint32_t); |
} |
static void gen6_emit_video_state(struct sna *sna, |
struct sna_composite_op *op, |
struct sna_video_frame *frame) |
{ |
uint32_t src_surf_format; |
uint32_t src_surf_base[6]; |
int src_width[6]; |
int src_height[6]; |
int src_pitch[6]; |
uint32_t *binding_table; |
uint16_t offset; |
bool dirty; |
int n_src, n; |
gen6_get_batch(sna); |
dirty = kgem_bo_is_dirty(op->dst.bo); |
src_surf_base[0] = 0; |
src_surf_base[1] = 0; |
src_surf_base[2] = frame->VBufOffset; |
src_surf_base[3] = frame->VBufOffset; |
src_surf_base[4] = frame->UBufOffset; |
src_surf_base[5] = frame->UBufOffset; |
if (is_planar_fourcc(frame->id)) { |
src_surf_format = GEN6_SURFACEFORMAT_R8_UNORM; |
src_width[1] = src_width[0] = frame->width; |
src_height[1] = src_height[0] = frame->height; |
src_pitch[1] = src_pitch[0] = frame->pitch[1]; |
src_width[4] = src_width[5] = src_width[2] = src_width[3] = |
frame->width / 2; |
src_height[4] = src_height[5] = src_height[2] = src_height[3] = |
frame->height / 2; |
src_pitch[4] = src_pitch[5] = src_pitch[2] = src_pitch[3] = |
frame->pitch[0]; |
n_src = 6; |
} else { |
if (frame->id == FOURCC_UYVY) |
src_surf_format = GEN6_SURFACEFORMAT_YCRCB_SWAPY; |
else |
src_surf_format = GEN6_SURFACEFORMAT_YCRCB_NORMAL; |
src_width[0] = frame->width; |
src_height[0] = frame->height; |
src_pitch[0] = frame->pitch[0]; |
n_src = 1; |
} |
binding_table = gen6_composite_get_binding_table(sna, &offset); |
binding_table[0] = |
gen6_bind_bo(sna, |
op->dst.bo, op->dst.width, op->dst.height, |
gen6_get_dest_format(op->dst.format), |
TRUE); |
for (n = 0; n < n_src; n++) { |
binding_table[1+n] = |
gen6_bind_video_source(sna, |
frame->bo, |
src_surf_base[n], |
src_width[n], |
src_height[n], |
src_pitch[n], |
src_surf_format); |
} |
gen6_emit_state(sna, op, offset | dirty); |
} |
static Bool |
gen6_render_video(struct sna *sna, |
struct sna_video *video, |
struct sna_video_frame *frame, |
RegionPtr dstRegion, |
short src_w, short src_h, |
short drw_w, short drw_h, |
PixmapPtr pixmap) |
{ |
struct sna_composite_op tmp; |
int nbox, dxo, dyo, pix_xoff, pix_yoff; |
float src_scale_x, src_scale_y; |
struct sna_pixmap *priv; |
BoxPtr box; |
DBG(("%s: src=(%d, %d), dst=(%d, %d), %dx[(%d, %d), (%d, %d)...]\n", |
__FUNCTION__, src_w, src_h, drw_w, drw_h, |
REGION_NUM_RECTS(dstRegion), |
REGION_EXTENTS(NULL, dstRegion)->x1, |
REGION_EXTENTS(NULL, dstRegion)->y1, |
REGION_EXTENTS(NULL, dstRegion)->x2, |
REGION_EXTENTS(NULL, dstRegion)->y2)); |
priv = sna_pixmap_force_to_gpu(pixmap, MOVE_READ | MOVE_WRITE); |
if (priv == NULL) |
return FALSE; |
memset(&tmp, 0, sizeof(tmp)); |
tmp.op = PictOpSrc; |
tmp.dst.pixmap = pixmap; |
tmp.dst.width = pixmap->drawable.width; |
tmp.dst.height = pixmap->drawable.height; |
tmp.dst.format = sna_render_format_for_depth(pixmap->drawable.depth); |
tmp.dst.bo = priv->gpu_bo; |
tmp.src.bo = frame->bo; |
tmp.src.filter = SAMPLER_FILTER_BILINEAR; |
tmp.src.repeat = SAMPLER_EXTEND_PAD; |
tmp.mask.bo = NULL; |
tmp.is_affine = TRUE; |
tmp.floats_per_vertex = 3; |
tmp.floats_per_rect = 9; |
if (is_planar_fourcc(frame->id)) { |
tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_VIDEO_PLANAR; |
tmp.u.gen6.nr_surfaces = 7; |
} else { |
tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_VIDEO_PACKED; |
tmp.u.gen6.nr_surfaces = 2; |
} |
tmp.u.gen6.nr_inputs = 1; |
tmp.u.gen6.ve_id = 1; |
kgem_set_mode(&sna->kgem, KGEM_RENDER); |
if (!kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)) { |
kgem_submit(&sna->kgem); |
assert(kgem_check_bo(&sna->kgem, tmp.dst.bo, frame->bo, NULL)); |
_kgem_set_mode(&sna->kgem, KGEM_RENDER); |
} |
gen6_emit_video_state(sna, &tmp, frame); |
gen6_align_vertex(sna, &tmp); |
/* Set up the offset for translating from the given region (in screen |
* coordinates) to the backing pixmap. |
*/ |
#ifdef COMPOSITE |
pix_xoff = -pixmap->screen_x + pixmap->drawable.x; |
pix_yoff = -pixmap->screen_y + pixmap->drawable.y; |
#else |
pix_xoff = 0; |
pix_yoff = 0; |
#endif |
dxo = dstRegion->extents.x1; |
dyo = dstRegion->extents.y1; |
/* Use normalized texture coordinates */ |
src_scale_x = ((float)src_w / frame->width) / (float)drw_w; |
src_scale_y = ((float)src_h / frame->height) / (float)drw_h; |
box = REGION_RECTS(dstRegion); |
nbox = REGION_NUM_RECTS(dstRegion); |
while (nbox--) { |
BoxRec r; |
r.x1 = box->x1 + pix_xoff; |
r.x2 = box->x2 + pix_xoff; |
r.y1 = box->y1 + pix_yoff; |
r.y2 = box->y2 + pix_yoff; |
if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) { |
_kgem_submit(&sna->kgem); |
gen6_emit_video_state(sna, &tmp, frame); |
gen6_get_rectangles(sna, &tmp, 1); |
} |
OUT_VERTEX(r.x2, r.y2); |
OUT_VERTEX_F((box->x2 - dxo) * src_scale_x); |
OUT_VERTEX_F((box->y2 - dyo) * src_scale_y); |
OUT_VERTEX(r.x1, r.y2); |
OUT_VERTEX_F((box->x1 - dxo) * src_scale_x); |
OUT_VERTEX_F((box->y2 - dyo) * src_scale_y); |
OUT_VERTEX(r.x1, r.y1); |
OUT_VERTEX_F((box->x1 - dxo) * src_scale_x); |
OUT_VERTEX_F((box->y1 - dyo) * src_scale_y); |
if (!DAMAGE_IS_ALL(priv->gpu_damage)) { |
sna_damage_add_box(&priv->gpu_damage, &r); |
sna_damage_subtract_box(&priv->cpu_damage, &r); |
} |
box++; |
} |
priv->clear = false; |
gen6_vertex_flush(sna); |
return TRUE; |
} |
#endif |
fastcall static void |
gen6_emit_composite_primitive_identity_source_mask(struct sna *sna, |
const struct sna_composite_op *op, |
const struct sna_composite_rectangles *r) |
{ |
union { |
struct sna_coordinate p; |
float f; |
} dst; |
float src_x, src_y; |
float msk_x, msk_y; |
float w, h; |
float *v; |
src_x = r->src.x + op->src.offset[0]; |
src_y = r->src.y + op->src.offset[1]; |
msk_x = r->mask.x + op->mask.offset[0]; |
msk_y = r->mask.y + op->mask.offset[1]; |
w = r->width; |
h = r->height; |
v = sna->render.vertices + sna->render.vertex_used; |
sna->render.vertex_used += 15; |
dst.p.x = r->dst.x + r->width; |
dst.p.y = r->dst.y + r->height; |
v[0] = dst.f; |
v[1] = (src_x + w) * op->src.scale[0]; |
v[2] = (src_y + h) * op->src.scale[1]; |
v[3] = (msk_x + w) * op->mask.scale[0]; |
v[4] = (msk_y + h) * op->mask.scale[1]; |
dst.p.x = r->dst.x; |
v[5] = dst.f; |
v[6] = src_x * op->src.scale[0]; |
v[7] = v[2]; |
v[8] = msk_x * op->mask.scale[0]; |
v[9] = v[4]; |
dst.p.y = r->dst.y; |
v[10] = dst.f; |
v[11] = v[6]; |
v[12] = src_y * op->src.scale[1]; |
v[13] = v[8]; |
v[14] = msk_y * op->mask.scale[1]; |
} |
fastcall static void |
gen6_render_composite_box(struct sna *sna, |
const struct sna_composite_op *op, |
const BoxRec *box) |
{ |
struct sna_composite_rectangles r; |
if (unlikely(!gen6_get_rectangles(sna, op, 1))) { |
// _kgem_submit(&sna->kgem); |
// gen6_emit_composite_state(sna, op); |
// gen6_get_rectangles(sna, op, 1); |
} |
DBG((" %s: (%d, %d), (%d, %d)\n", |
__FUNCTION__, |
box->x1, box->y1, box->x2, box->y2)); |
r.dst.x = box->x1; |
r.dst.y = box->y1; |
r.width = box->x2 - box->x1; |
r.height = box->y2 - box->y1; |
r.src = r.mask = r.dst; |
op->prim_emit(sna, op, &r); |
} |
static void gen6_render_composite_done(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
DBG(("%s\n", __FUNCTION__)); |
if (sna->render_state.gen6.vertex_offset) { |
gen6_vertex_flush(sna); |
gen6_magic_ca_pass(sna, op); |
} |
// if (op->mask.bo) |
// kgem_bo_destroy(&sna->kgem, op->mask.bo); |
// if (op->src.bo) |
// kgem_bo_destroy(&sna->kgem, op->src.bo); |
// sna_render_composite_redirect_done(sna, op); |
} |
static Bool |
gen6_render_composite(struct sna *sna, |
uint8_t op, |
bitmap_t *src, |
struct kgem_bo *src_bo, |
bitmap_t *mask, |
struct kgem_bo *mask_bo, |
bitmap_t *dst, |
struct kgem_bo *dst_bo, |
int16_t src_x, int16_t src_y, |
int16_t msk_x, int16_t msk_y, |
int16_t dst_x, int16_t dst_y, |
int16_t width, int16_t height, |
struct sna_composite_op *tmp) |
{ |
// if (op >= ARRAY_SIZE(gen6_blend_op)) |
// return FALSE; |
// ENTER(); |
DBG(("%s: %dx%d, current mode=%d\n", __FUNCTION__, |
width, height, sna->kgem.ring)); |
tmp->op = PictOpSrc; |
tmp->dst.bo = dst_bo; |
tmp->dst.width = dst->width; |
tmp->dst.height = dst->height; |
tmp->dst.format = GEN6_SURFACEFORMAT_B8G8R8A8_UNORM; |
tmp->src.bo = src_bo; |
tmp->src.card_format = GEN6_SURFACEFORMAT_B8G8R8A8_UNORM; |
tmp->src.width = src->width; |
tmp->src.height = src->height; |
tmp->src.scale[0] = 1.f/width; //src->width; |
tmp->src.scale[1] = 1.f/height; //src->height; |
tmp->src.filter = SAMPLER_FILTER_BILINEAR; |
tmp->src.repeat = SAMPLER_EXTEND_NONE; |
tmp->src.offset[0] = -dst_x; |
tmp->src.offset[1] = -dst_y; |
tmp->src.is_affine = TRUE; |
tmp->mask.bo = mask_bo; |
tmp->mask.card_format = GEN6_SURFACEFORMAT_A8_UNORM; |
tmp->mask.width = mask->width; |
tmp->mask.height = mask->height; |
tmp->mask.scale[0] = 1.f/mask->width; |
tmp->mask.scale[1] = 1.f/mask->height; |
tmp->mask.filter = SAMPLER_FILTER_NEAREST; |
tmp->mask.repeat = SAMPLER_EXTEND_NONE; |
tmp->mask.offset[0] = -dst_x; |
tmp->mask.offset[1] = -dst_y; |
tmp->mask.is_affine = TRUE; |
tmp->is_affine = TRUE; |
tmp->has_component_alpha = FALSE; |
tmp->need_magic_ca_pass = FALSE; |
tmp->prim_emit = gen6_emit_composite_primitive_identity_source_mask; |
tmp->floats_per_vertex = 5 + 2 * !tmp->is_affine; |
tmp->floats_per_rect = 3 * tmp->floats_per_vertex; |
tmp->u.gen6.wm_kernel = GEN6_WM_KERNEL_MASK; |
tmp->u.gen6.nr_surfaces = 2 + 1; |
tmp->u.gen6.nr_inputs = 1 + 1; |
tmp->u.gen6.ve_id = gen6_choose_composite_vertex_buffer(tmp); |
tmp->need_magic_ca_pass = TRUE; |
// tmp->blt = gen6_render_composite_blt; |
tmp->box = gen6_render_composite_box; |
// tmp->boxes = gen6_render_composite_boxes; |
tmp->done = gen6_render_composite_done; |
gen6_emit_composite_state(sna, tmp); |
gen6_align_vertex(sna, tmp); |
// LEAVE(); |
return TRUE; |
} |
static void |
gen6_emit_copy_state(struct sna *sna, |
const struct sna_composite_op *op) |
{ |
uint32_t *binding_table; |
uint16_t offset; |
bool dirty; |
gen6_get_batch(sna); |
binding_table = gen6_composite_get_binding_table(sna, &offset); |
binding_table[0] = |
gen6_bind_bo(sna, |
op->dst.bo, op->dst.width, op->dst.height, |
GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, |
TRUE); |
binding_table[1] = |
gen6_bind_bo(sna, |
op->src.bo, op->src.width, op->src.height, |
GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, |
FALSE); |
if (sna->kgem.surface == offset && |
*(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table) { |
sna->kgem.surface += sizeof(struct gen6_surface_state_padded) / sizeof(uint32_t); |
offset = sna->render_state.gen6.surface_table; |
} |
gen6_emit_state(sna, op, offset | dirty); |
} |
static void |
gen6_render_copy_blt(struct sna *sna, |
const struct sna_composite_op *op, |
int16_t sx, int16_t sy, |
int16_t w, int16_t h, |
int16_t dx, int16_t dy) |
{ |
if (unlikely(!gen6_get_rectangles(sna, op, 1))) { |
// _kgem_submit(&sna->kgem); |
gen6_emit_copy_state(sna, op); |
gen6_get_rectangles(sna, op, 1); |
} |
OUT_VERTEX(dx+w, dy+h); |
OUT_VERTEX_F((sx+w)*op->src.scale[0]); |
OUT_VERTEX_F((sy+h)*op->src.scale[1]); |
OUT_VERTEX(dx, dy+h); |
OUT_VERTEX_F(sx*op->src.scale[0]); |
OUT_VERTEX_F((sy+h)*op->src.scale[1]); |
OUT_VERTEX(dx, dy); |
OUT_VERTEX_F(sx*op->src.scale[0]); |
OUT_VERTEX_F(sy*op->src.scale[1]); |
} |
static void |
gen6_render_copy_done(struct sna *sna) |
{ |
DBG(("%s()\n", __FUNCTION__)); |
if (sna->render_state.gen6.vertex_offset) |
gen6_vertex_flush(sna); |
} |
static Bool |
gen6_render_copy(struct sna *sna, uint8_t alu, |
bitmap_t *src, struct kgem_bo *src_bo, |
bitmap_t *dst, struct kgem_bo *dst_bo, |
int dst_x, int dst_y, int src_x, int src_y, int w, int h) |
{ |
struct sna_composite_op op; |
memset(&op, 0, sizeof(op)); |
DBG(("%s (alu=%d, src=(%dx%d), dst=(%dx%d))\n", |
__FUNCTION__, alu, |
src->width, src->height, |
dst->width, dst->height)); |
// printf("%s %dx%d src=(%dx%d), dst=(%dx%d)\n", |
// __FUNCTION__,dst_x, dst_y, |
// src->width, src->height, |
// dst->width, dst->height); |
op.dst.format = 0; |
op.src.pict_format = 0; |
op.op = PictOpSrc; |
op.dst.pixmap = dst; |
op.dst.width = dst->width; |
op.dst.height = dst->height; |
op.dst.bo = dst_bo; |
op.src.bo = src_bo; |
op.src.card_format = GEN6_SURFACEFORMAT_B8G8R8X8_UNORM; |
op.src.width = src->width; |
op.src.height = src->height; |
op.src.scale[0] = 1.f/w; //src->width; |
op.src.scale[1] = 1.f/h; //src->height; |
op.src.filter = SAMPLER_FILTER_BILINEAR; |
op.src.repeat = SAMPLER_EXTEND_NONE; |
op.mask.bo = NULL; |
op.is_affine = true; |
op.floats_per_vertex = 3; |
op.floats_per_rect = 9; |
op.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK; |
op.u.gen6.nr_surfaces = 2; |
op.u.gen6.nr_inputs = 1; |
op.u.gen6.ve_id = 1; |
gen6_emit_copy_state(sna, &op); |
gen6_align_vertex(sna, &op); |
gen6_render_copy_blt(sna, &op, src_x, src_y, w, h, dst_x, dst_y); |
gen6_render_copy_done(sna); |
return TRUE; |
} |
static void |
gen6_emit_fill_state(struct sna *sna, const struct sna_composite_op *op) |
{ |
uint32_t *binding_table; |
uint16_t offset; |
bool dirty; |
gen6_get_batch(sna); |
// dirty = kgem_bo_is_dirty(op->dst.bo); |
binding_table = gen6_composite_get_binding_table(sna, &offset); |
binding_table[0] = |
gen6_bind_bo(sna, |
op->dst.bo, 1024, 768, |
GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, |
TRUE); |
binding_table[1] = |
gen6_bind_bo(sna, |
op->src.bo, 1, 1, |
GEN6_SURFACEFORMAT_B8G8R8A8_UNORM, |
FALSE); |
if (sna->kgem.surface == offset && |
*(uint64_t *)(sna->kgem.batch + sna->render_state.gen6.surface_table) == *(uint64_t*)binding_table) { |
sna->kgem.surface += |
sizeof(struct gen6_surface_state_padded)/sizeof(uint32_t); |
offset = sna->render_state.gen6.surface_table; |
} |
gen6_emit_state(sna, op, offset | dirty); |
} |
static Bool |
gen6_render_clear(struct sna *sna, bitmap_t *dst, struct kgem_bo *bo) |
{ |
struct sna_composite_op tmp; |
DBG(("%s: %dx%d\n", |
__FUNCTION__, |
dst->width, |
dst->height)); |
tmp.op = PictOpSrc; |
tmp.dst.pixmap = dst; |
tmp.dst.width = dst->width; |
tmp.dst.height = dst->height; |
tmp.dst.format = 0; //PICT_a8r8g8b8; |
tmp.dst.bo = bo; |
tmp.dst.x = tmp.dst.y = 0; |
// tmp.src.bo = sna_render_get_solid(sna, 0); |
tmp.src.bo = bo; |
tmp.src.filter = SAMPLER_FILTER_NEAREST; |
tmp.src.repeat = SAMPLER_EXTEND_REPEAT; |
tmp.mask.bo = NULL; |
tmp.mask.filter = SAMPLER_FILTER_NEAREST; |
tmp.mask.repeat = SAMPLER_EXTEND_NONE; |
tmp.is_affine = TRUE; |
tmp.floats_per_vertex = 3; |
tmp.floats_per_rect = 9; |
tmp.has_component_alpha = 0; |
tmp.need_magic_ca_pass = FALSE; |
tmp.u.gen6.wm_kernel = GEN6_WM_KERNEL_NOMASK; |
tmp.u.gen6.nr_surfaces = 2; |
tmp.u.gen6.nr_inputs = 1; |
tmp.u.gen6.ve_id = 1; |
// if (!kgem_check_bo(&sna->kgem, bo, NULL)) { |
// _kgem_submit(&sna->kgem); |
// assert(kgem_check_bo(&sna->kgem, bo, NULL)); |
// } |
gen6_emit_fill_state(sna, &tmp); |
gen6_align_vertex(sna, &tmp); |
if (unlikely(!gen6_get_rectangles(sna, &tmp, 1))) { |
// _kgem_submit(&sna->kgem); |
gen6_emit_fill_state(sna, &tmp); |
gen6_get_rectangles(sna, &tmp, 1); |
} |
OUT_VERTEX(dst->width, dst->height); |
OUT_VERTEX_F(1); |
OUT_VERTEX_F(1); |
OUT_VERTEX(0, dst->height); |
OUT_VERTEX_F(0); |
OUT_VERTEX_F(1); |
OUT_VERTEX(0, 0); |
OUT_VERTEX_F(0); |
OUT_VERTEX_F(0); |
gen6_vertex_flush(sna); |
// kgem_bo_destroy(&sna->kgem, tmp.src.bo); |
// gen6_render_composite_done(sna, &tmp); |
// _kgem_submit(&sna->kgem); |
return TRUE; |
} |
static void gen6_render_flush(struct sna *sna) |
{ |
gen6_vertex_close(sna); |
} |
static void |
gen6_render_retire(struct kgem *kgem) |
{ |
if (kgem->ring && (kgem->has_semaphores || !kgem->need_retire)) |
kgem->ring = kgem->mode; |
} |
static void gen6_render_reset(struct sna *sna) |
{ |
sna->render_state.gen6.needs_invariant = TRUE; |
sna->render_state.gen6.vb_id = 0; |
sna->render_state.gen6.ve_id = -1; |
sna->render_state.gen6.last_primitive = -1; |
sna->render_state.gen6.num_sf_outputs = 0; |
sna->render_state.gen6.samplers = -1; |
sna->render_state.gen6.blend = -1; |
sna->render_state.gen6.kernel = -1; |
sna->render_state.gen6.drawrect_offset = -1; |
sna->render_state.gen6.drawrect_limit = -1; |
sna->render_state.gen6.surface_table = -1; |
} |
static void gen6_render_fini(struct sna *sna) |
{ |
// kgem_bo_destroy(&sna->kgem, sna->render_state.gen6.general_bo); |
} |
static Bool gen6_render_setup(struct sna *sna) |
{ |
struct gen6_render_state *state = &sna->render_state.gen6; |
struct sna_static_stream general; |
struct gen6_sampler_state *ss; |
int i, j, k, l, m; |
sna_static_stream_init(&general); |
/* Zero pad the start. If you see an offset of 0x0 in the batchbuffer |
* dumps, you know it points to zero. |
*/ |
null_create(&general); |
scratch_create(&general); |
for (m = 0; m < GEN6_KERNEL_COUNT; m++) |
state->wm_kernel[m] = |
sna_static_stream_add(&general, |
wm_kernels[m].data, |
wm_kernels[m].size, |
64); |
ss = sna_static_stream_map(&general, |
2 * sizeof(*ss) * |
FILTER_COUNT * EXTEND_COUNT * |
FILTER_COUNT * EXTEND_COUNT, |
32); |
state->wm_state = sna_static_stream_offsetof(&general, ss); |
for (i = 0; i < FILTER_COUNT; i++) { |
for (j = 0; j < EXTEND_COUNT; j++) { |
for (k = 0; k < FILTER_COUNT; k++) { |
for (l = 0; l < EXTEND_COUNT; l++) { |
sampler_state_init(ss++, i, j); |
sampler_state_init(ss++, k, l); |
} |
} |
} |
} |
state->cc_vp = gen6_create_cc_viewport(&general); |
state->cc_blend = gen6_composite_create_blend_state(&general); |
state->general_bo = sna_static_stream_fini(sna, &general); |
return state->general_bo != NULL; |
} |
Bool gen6_render_init(struct sna *sna) |
{ |
if (!gen6_render_setup(sna)) |
return FALSE; |
// sna->kgem.context_switch = gen6_render_context_switch; |
sna->kgem.retire = gen6_render_retire; |
sna->render.composite = gen6_render_composite; |
// sna->render.video = gen6_render_video; |
// sna->render.copy_boxes = gen6_render_copy_boxes; |
sna->render.copy = gen6_render_copy; |
// sna->render.fill_boxes = gen6_render_fill_boxes; |
// sna->render.fill = gen6_render_fill; |
// sna->render.fill_one = gen6_render_fill_one; |
sna->render.clear = gen6_render_clear; |
sna->render.flush = gen6_render_flush; |
sna->render.reset = gen6_render_reset; |
// sna->render.fini = gen6_render_fini; |
sna->render.max_3d_size = GEN6_MAX_SIZE; |
sna->render.max_3d_pitch = 1 << 18; |
return TRUE; |
} |
/drivers/video/drm/i915/sna/sna.c |
---|
0,0 → 1,387 |
#include <drmP.h> |
#include <drm.h> |
#include "i915_drm.h" |
#include "i915_drv.h" |
#include "intel_drv.h" |
#include <linux/kernel.h> |
#include <linux/module.h> |
#include <errno-base.h> |
#include <memory.h> |
#include <syscall.h> |
#include "../bitmap.h" |
#include "sna.h" |
struct kgem_bo *create_bo(bitmap_t *bitmap); |
static Bool sna_solid_cache_init(struct sna *sna); |
struct sna *sna_device; |
void no_render_init(struct sna *sna) |
{ |
struct sna_render *render = &sna->render; |
memset (render,0, sizeof (*render)); |
render->vertices = render->vertex_data; |
render->vertex_size = ARRAY_SIZE(render->vertex_data); |
// render->composite = no_render_composite; |
// render->copy_boxes = no_render_copy_boxes; |
// render->copy = no_render_copy; |
// render->fill_boxes = no_render_fill_boxes; |
// render->fill = no_render_fill; |
// render->fill_one = no_render_fill_one; |
// render->clear = no_render_clear; |
// render->reset = no_render_reset; |
// render->flush = no_render_flush; |
// render->fini = no_render_fini; |
// sna->kgem.context_switch = no_render_context_switch; |
// sna->kgem.retire = no_render_retire; |
// if (sna->kgem.gen >= 60) |
sna->kgem.ring = KGEM_RENDER; |
} |
Bool sna_accel_init(struct sna *sna) |
{ |
const char *backend; |
// list_init(&sna->deferred_free); |
// list_init(&sna->dirty_pixmaps); |
// list_init(&sna->active_pixmaps); |
// list_init(&sna->inactive_clock[0]); |
// list_init(&sna->inactive_clock[1]); |
// sna_accel_install_timers(sna); |
backend = "no"; |
sna->have_render = false; |
sna->default_tiling = 0; //I915_TILING_X; |
no_render_init(sna); |
if ((sna->have_render = gen6_render_init(sna))) |
backend = "SandyBridge"; |
/* |
if (sna->chipset.info->gen >= 80) { |
} else if (sna->chipset.info->gen >= 70) { |
if ((sna->have_render = gen7_render_init(sna))) |
backend = "IvyBridge"; |
} else if (sna->chipset.info->gen >= 60) { |
if ((sna->have_render = gen6_render_init(sna))) |
backend = "SandyBridge"; |
} else if (sna->chipset.info->gen >= 50) { |
if ((sna->have_render = gen5_render_init(sna))) |
backend = "Ironlake"; |
} else if (sna->chipset.info->gen >= 40) { |
if ((sna->have_render = gen4_render_init(sna))) |
backend = "Broadwater"; |
} else if (sna->chipset.info->gen >= 30) { |
if ((sna->have_render = gen3_render_init(sna))) |
backend = "gen3"; |
} else if (sna->chipset.info->gen >= 20) { |
if ((sna->have_render = gen2_render_init(sna))) |
backend = "gen2"; |
} |
*/ |
DBG(("%s(backend=%s, have_render=%d)\n", |
__FUNCTION__, backend, sna->have_render)); |
kgem_reset(&sna->kgem); |
if (!sna_solid_cache_init(sna)) |
return FALSE; |
sna_device = sna; |
#if 0 |
{ |
struct kgem_bo *screen_bo; |
bitmap_t screen; |
screen.pitch = 1024*4; |
screen.gaddr = 0; |
screen.width = 1024; |
screen.height = 768; |
screen.obj = (void*)-1; |
screen_bo = create_bo(&screen); |
sna->render.clear(sna, &screen, screen_bo); |
} |
#endif |
return TRUE; |
} |
int sna_init() |
{ |
struct sna *sna; |
DBG(("%s\n", __FUNCTION__)); |
sna = kzalloc(sizeof(struct sna), 0); |
if (sna == NULL) |
return FALSE; |
// sna->mode.cpp = 4; |
kgem_init(&sna->kgem, 60); |
/* |
if (!xf86ReturnOptValBool(sna->Options, |
OPTION_RELAXED_FENCING, |
sna->kgem.has_relaxed_fencing)) { |
xf86DrvMsg(scrn->scrnIndex, |
sna->kgem.has_relaxed_fencing ? X_CONFIG : X_PROBED, |
"Disabling use of relaxed fencing\n"); |
sna->kgem.has_relaxed_fencing = 0; |
} |
if (!xf86ReturnOptValBool(sna->Options, |
OPTION_VMAP, |
sna->kgem.has_vmap)) { |
xf86DrvMsg(scrn->scrnIndex, |
sna->kgem.has_vmap ? X_CONFIG : X_PROBED, |
"Disabling use of vmap\n"); |
sna->kgem.has_vmap = 0; |
} |
*/ |
/* Disable tiling by default */ |
sna->tiling = SNA_TILING_DISABLE; |
/* Default fail-safe value of 75 Hz */ |
// sna->vblank_interval = 1000 * 1000 * 1000 / 75; |
sna->flags = 0; |
sna->flags |= SNA_NO_THROTTLE; |
sna->flags |= SNA_NO_DELAYED_FLUSH; |
return sna_accel_init(sna); |
} |
static Bool sna_solid_cache_init(struct sna *sna) |
{ |
struct sna_solid_cache *cache = &sna->render.solid_cache; |
DBG(("%s\n", __FUNCTION__)); |
cache->cache_bo = |
kgem_create_linear(&sna->kgem, sizeof(cache->color)); |
if (!cache->cache_bo) |
return FALSE; |
/* |
* Initialise [0] with white since it is very common and filling the |
* zeroth slot simplifies some of the checks. |
*/ |
cache->color[0] = 0xffffffff; |
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t)); |
cache->bo[0]->pitch = 4; |
cache->dirty = 1; |
cache->size = 1; |
cache->last = 0; |
return TRUE; |
} |
void |
sna_render_flush_solid(struct sna *sna) |
{ |
struct sna_solid_cache *cache = &sna->render.solid_cache; |
DBG(("sna_render_flush_solid(size=%d)\n", cache->size)); |
assert(cache->dirty); |
assert(cache->size); |
kgem_bo_write(&sna->kgem, cache->cache_bo, |
cache->color, cache->size*sizeof(uint32_t)); |
cache->dirty = 0; |
cache->last = 0; |
} |
static void |
sna_render_finish_solid(struct sna *sna, bool force) |
{ |
struct sna_solid_cache *cache = &sna->render.solid_cache; |
int i; |
DBG(("sna_render_finish_solid(force=%d, domain=%d, busy=%d, dirty=%d)\n", |
force, cache->cache_bo->domain, cache->cache_bo->rq != NULL, cache->dirty)); |
if (!force && cache->cache_bo->domain != DOMAIN_GPU) |
return; |
if (cache->dirty) |
sna_render_flush_solid(sna); |
for (i = 0; i < cache->size; i++) { |
if (cache->bo[i] == NULL) |
continue; |
kgem_bo_destroy(&sna->kgem, cache->bo[i]); |
cache->bo[i] = NULL; |
} |
kgem_bo_destroy(&sna->kgem, cache->cache_bo); |
DBG(("sna_render_finish_solid reset\n")); |
cache->cache_bo = kgem_create_linear(&sna->kgem, sizeof(cache->color)); |
cache->bo[0] = kgem_create_proxy(cache->cache_bo, 0, sizeof(uint32_t)); |
cache->bo[0]->pitch = 4; |
if (force) |
cache->size = 1; |
} |
struct kgem_bo * |
sna_render_get_solid(struct sna *sna, uint32_t color) |
{ |
struct sna_solid_cache *cache = &sna->render.solid_cache; |
int i; |
DBG(("%s: %08x\n", __FUNCTION__, color)); |
// if ((color & 0xffffff) == 0) /* alpha only */ |
// return kgem_bo_reference(sna->render.alpha_cache.bo[color>>24]); |
if (color == 0xffffffff) { |
DBG(("%s(white)\n", __FUNCTION__)); |
return kgem_bo_reference(cache->bo[0]); |
} |
if (cache->color[cache->last] == color) { |
DBG(("sna_render_get_solid(%d) = %x (last)\n", |
cache->last, color)); |
return kgem_bo_reference(cache->bo[cache->last]); |
} |
for (i = 1; i < cache->size; i++) { |
if (cache->color[i] == color) { |
if (cache->bo[i] == NULL) { |
DBG(("sna_render_get_solid(%d) = %x (recreate)\n", |
i, color)); |
goto create; |
} else { |
DBG(("sna_render_get_solid(%d) = %x (old)\n", |
i, color)); |
goto done; |
} |
} |
} |
sna_render_finish_solid(sna, i == ARRAY_SIZE(cache->color)); |
i = cache->size++; |
cache->color[i] = color; |
cache->dirty = 1; |
DBG(("sna_render_get_solid(%d) = %x (new)\n", i, color)); |
create: |
cache->bo[i] = kgem_create_proxy(cache->cache_bo, |
i*sizeof(uint32_t), sizeof(uint32_t)); |
cache->bo[i]->pitch = 4; |
done: |
cache->last = i; |
return kgem_bo_reference(cache->bo[i]); |
} |
int sna_blit_copy(bitmap_t *dst_bitmap, int dst_x, int dst_y, |
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y) |
{ |
batchbuffer_t execbuffer; |
struct kgem_bo src_bo, dst_bo; |
memset(&execbuffer, 0, sizeof(execbuffer)); |
memset(&src_bo, 0, sizeof(src_bo)); |
memset(&dst_bo, 0, sizeof(dst_bo)); |
INIT_LIST_HEAD(&execbuffer.objects); |
src_bo.gaddr = src_bitmap->gaddr; |
src_bo.pitch = src_bitmap->pitch; |
src_bo.tiling = 0; |
dst_bo.gaddr = dst_bitmap->gaddr; |
dst_bo.pitch = dst_bitmap->pitch; |
dst_bo.tiling = 0; |
sna_device->render.copy(sna_device, 0, src_bitmap, &src_bo, |
dst_bitmap, &dst_bo, dst_x, dst_y, |
src_x, src_y, w, h); |
INIT_LIST_HEAD(&execbuffer.objects); |
list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects); |
_kgem_submit(&sna_device->kgem, &execbuffer); |
}; |
int sna_blit_tex(bitmap_t *dst_bitmap, int dst_x, int dst_y, |
int w, int h, bitmap_t *src_bitmap, int src_x, int src_y, |
bitmap_t *mask_bitmap) |
{ |
struct sna_composite_op cop; |
batchbuffer_t execbuffer; |
BoxRec box; |
struct kgem_bo src_bo, mask_bo, dst_bo; |
memset(&cop, 0, sizeof(cop)); |
memset(&execbuffer, 0, sizeof(execbuffer)); |
memset(&src_bo, 0, sizeof(src_bo)); |
memset(&dst_bo, 0, sizeof(dst_bo)); |
memset(&mask_bo, 0, sizeof(mask_bo)); |
src_bo.gaddr = src_bitmap->gaddr; |
src_bo.pitch = src_bitmap->pitch; |
src_bo.tiling = 0; |
dst_bo.gaddr = dst_bitmap->gaddr; |
dst_bo.pitch = dst_bitmap->pitch; |
dst_bo.tiling = 0; |
mask_bo.gaddr = mask_bitmap->gaddr; |
mask_bo.pitch = mask_bitmap->pitch; |
mask_bo.tiling = 0; |
box.x1 = dst_x; |
box.y1 = dst_y; |
box.x2 = dst_x+w; |
box.y2 = dst_y+h; |
sna_device->render.composite(sna_device, 0, |
src_bitmap, &src_bo, |
mask_bitmap, &mask_bo, |
dst_bitmap, &dst_bo, |
src_x, src_y, |
src_x, src_y, |
dst_x, dst_y, |
w, h, &cop); |
cop.box(sna_device, &cop, &box); |
cop.done(sna_device, &cop); |
INIT_LIST_HEAD(&execbuffer.objects); |
list_add_tail(&src_bitmap->obj->exec_list, &execbuffer.objects); |
list_add_tail(&mask_bitmap->obj->exec_list, &execbuffer.objects); |
_kgem_submit(&sna_device->kgem, &execbuffer); |
}; |
/drivers/video/drm/i915/sna/sna_render.h |
---|
0,0 → 1,478 |
#ifndef SNA_RENDER_H |
#define SNA_RENDER_H |
typedef int Bool; |
#define GRADIENT_CACHE_SIZE 16 |
struct sna; |
struct sna_composite_rectangles { |
struct sna_coordinate { |
int16_t x, y; |
} src, mask, dst; |
int16_t width, height; |
}; |
struct sna_composite_op { |
fastcall void (*blt)(struct sna *sna, const struct sna_composite_op *op, |
const struct sna_composite_rectangles *r); |
fastcall void (*box)(struct sna *sna, |
const struct sna_composite_op *op, |
const BoxRec *box); |
void (*boxes)(struct sna *sna, const struct sna_composite_op *op, |
const BoxRec *box, int nbox); |
void (*done)(struct sna *sna, const struct sna_composite_op *op); |
struct sna_damage **damage; |
uint32_t op; |
struct { |
bitmap_t *pixmap; |
CARD32 format; |
struct kgem_bo *bo; |
int16_t x, y; |
uint16_t width, height; |
} dst; |
struct sna_composite_channel { |
struct kgem_bo *bo; |
// PictTransform *transform; |
uint16_t width; |
uint16_t height; |
uint32_t pict_format; |
uint32_t card_format; |
uint32_t filter; |
uint32_t repeat; |
uint32_t is_affine : 1; |
uint32_t is_solid : 1; |
uint32_t is_linear : 1; |
uint32_t is_opaque : 1; |
uint32_t alpha_fixup : 1; |
uint32_t rb_reversed : 1; |
int16_t offset[2]; |
float scale[2]; |
// pixman_transform_t embedded_transform; |
union { |
struct { |
uint32_t pixel; |
float linear_dx; |
float linear_dy; |
float linear_offset; |
} gen2; |
struct gen3_shader_channel { |
int type; |
uint32_t mode; |
uint32_t constants; |
} gen3; |
} u; |
} src, mask; |
uint32_t is_affine : 1; |
uint32_t has_component_alpha : 1; |
uint32_t need_magic_ca_pass : 1; |
uint32_t rb_reversed : 1; |
int16_t floats_per_vertex; |
int16_t floats_per_rect; |
fastcall void (*prim_emit)(struct sna *sna, |
const struct sna_composite_op *op, |
const struct sna_composite_rectangles *r); |
struct sna_composite_redirect { |
struct kgem_bo *real_bo; |
struct sna_damage **real_damage, *damage; |
BoxRec box; |
} redirect; |
union { |
struct sna_blt_state { |
bitmap_t *src_pixmap; |
int16_t sx, sy; |
uint32_t inplace :1; |
uint32_t overwrites:1; |
uint32_t bpp : 6; |
uint32_t cmd; |
uint32_t br13; |
uint32_t pitch[2]; |
uint32_t pixel; |
struct kgem_bo *bo[2]; |
} blt; |
struct { |
float constants[8]; |
uint32_t num_constants; |
} gen3; |
struct { |
int wm_kernel; |
int ve_id; |
} gen4; |
struct { |
int wm_kernel; |
int ve_id; |
} gen5; |
struct { |
int wm_kernel; |
int nr_surfaces; |
int nr_inputs; |
int ve_id; |
} gen6; |
struct { |
int wm_kernel; |
int nr_surfaces; |
int nr_inputs; |
int ve_id; |
} gen7; |
void *priv; |
} u; |
}; |
struct sna_render { |
int max_3d_size; |
int max_3d_pitch; |
Bool (*composite)(struct sna *sna, uint8_t op, |
bitmap_t *src, struct kgem_bo *src_bo, |
bitmap_t *mask, struct kgem_bo *mask_bo, |
bitmap_t *dst, struct kgem_bo *dst_bo, |
int16_t src_x, int16_t src_y, |
int16_t msk_x, int16_t msk_y, |
int16_t dst_x, int16_t dst_y, |
int16_t w, int16_t h, |
struct sna_composite_op *tmp); |
/* |
Bool (*composite_spans)(struct sna *sna, uint8_t op, |
PicturePtr dst, PicturePtr src, |
int16_t src_x, int16_t src_y, |
int16_t dst_x, int16_t dst_y, |
int16_t w, int16_t h, |
unsigned flags, |
struct sna_composite_spans_op *tmp); |
#define COMPOSITE_SPANS_RECTILINEAR 0x1 |
Bool (*video)(struct sna *sna, |
struct sna_video *video, |
struct sna_video_frame *frame, |
RegionPtr dstRegion, |
short src_w, short src_h, |
short drw_w, short drw_h, |
PixmapPtr pixmap); |
Bool (*fill_boxes)(struct sna *sna, |
CARD8 op, |
PictFormat format, |
const xRenderColor *color, |
PixmapPtr dst, struct kgem_bo *dst_bo, |
const BoxRec *box, int n); |
Bool (*fill)(struct sna *sna, uint8_t alu, |
PixmapPtr dst, struct kgem_bo *dst_bo, |
uint32_t color, |
struct sna_fill_op *tmp); |
Bool (*fill_one)(struct sna *sna, PixmapPtr dst, struct kgem_bo *dst_bo, |
uint32_t color, |
int16_t x1, int16_t y1, int16_t x2, int16_t y2, |
uint8_t alu); |
*/ |
Bool (*clear)(struct sna *sna, bitmap_t *dst, struct kgem_bo *dst_bo); |
/* |
Bool (*copy_boxes)(struct sna *sna, uint8_t alu, |
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy, |
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy, |
const BoxRec *box, int n); |
*/ |
Bool (*copy)(struct sna *sna, uint8_t alu, |
bitmap_t *src, struct kgem_bo *src_bo, |
bitmap_t *dst, struct kgem_bo *dst_bo, |
int dst_x, int dst_y, int src_x, int src_y, |
int w, int h); |
void (*flush)(struct sna *sna); |
void (*reset)(struct sna *sna); |
void (*fini)(struct sna *sna); |
// struct sna_alpha_cache { |
// struct kgem_bo *cache_bo; |
// struct kgem_bo *bo[256]; |
// } alpha_cache; |
struct sna_solid_cache { |
struct kgem_bo *cache_bo; |
uint32_t color[1024]; |
struct kgem_bo *bo[1024]; |
int last; |
int size; |
int dirty; |
} solid_cache; |
// struct { |
// struct sna_gradient_cache { |
// struct kgem_bo *bo; |
// int nstops; |
// PictGradientStop *stops; |
// } cache[GRADIENT_CACHE_SIZE]; |
// int size; |
// } gradient_cache; |
// struct sna_glyph_cache{ |
// PicturePtr picture; |
// struct sna_glyph **glyphs; |
// uint16_t count; |
// uint16_t evict; |
// } glyph[2]; |
uint16_t vertex_start; |
uint16_t vertex_index; |
uint16_t vertex_used; |
uint16_t vertex_size; |
uint16_t vertex_reloc[8]; |
struct kgem_bo *vbo; |
float *vertices; |
float vertex_data[1024]; |
}; |
enum { |
GEN6_WM_KERNEL_NOMASK = 0, |
GEN6_WM_KERNEL_MASK, |
GEN6_KERNEL_COUNT |
}; |
struct gen6_render_state { |
struct kgem_bo *general_bo; |
uint32_t vs_state; |
uint32_t sf_state; |
uint32_t sf_mask_state; |
uint32_t wm_state; |
uint32_t wm_kernel[GEN6_KERNEL_COUNT]; |
uint32_t cc_vp; |
uint32_t cc_blend; |
uint32_t drawrect_offset; |
uint32_t drawrect_limit; |
uint32_t blend; |
uint32_t samplers; |
uint32_t kernel; |
uint16_t num_sf_outputs; |
uint16_t vb_id; |
uint16_t ve_id; |
uint16_t vertex_offset; |
uint16_t last_primitive; |
int16_t floats_per_vertex; |
uint16_t surface_table; |
Bool needs_invariant; |
}; |
struct sna_static_stream { |
uint32_t size, used; |
uint8_t *data; |
}; |
int sna_static_stream_init(struct sna_static_stream *stream); |
uint32_t sna_static_stream_add(struct sna_static_stream *stream, |
const void *data, uint32_t len, uint32_t align); |
void *sna_static_stream_map(struct sna_static_stream *stream, |
uint32_t len, uint32_t align); |
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream, |
void *ptr); |
struct kgem_bo *sna_static_stream_fini(struct sna *sna, |
struct sna_static_stream *stream); |
/* |
struct kgem_bo * |
sna_render_get_solid(struct sna *sna, |
uint32_t color); |
void |
sna_render_flush_solid(struct sna *sna); |
struct kgem_bo * |
sna_render_get_gradient(struct sna *sna, |
PictGradient *pattern); |
uint32_t sna_rgba_for_color(uint32_t color, int depth); |
Bool sna_picture_is_solid(PicturePtr picture, uint32_t *color); |
*/ |
void no_render_init(struct sna *sna); |
Bool gen2_render_init(struct sna *sna); |
Bool gen3_render_init(struct sna *sna); |
Bool gen4_render_init(struct sna *sna); |
Bool gen5_render_init(struct sna *sna); |
Bool gen6_render_init(struct sna *sna); |
Bool gen7_render_init(struct sna *sna); |
/* |
Bool sna_tiling_composite(uint32_t op, |
PicturePtr src, |
PicturePtr mask, |
PicturePtr dst, |
int16_t src_x, int16_t src_y, |
int16_t mask_x, int16_t mask_y, |
int16_t dst_x, int16_t dst_y, |
int16_t width, int16_t height, |
struct sna_composite_op *tmp); |
Bool sna_tiling_fill_boxes(struct sna *sna, |
CARD8 op, |
PictFormat format, |
const xRenderColor *color, |
PixmapPtr dst, struct kgem_bo *dst_bo, |
const BoxRec *box, int n); |
Bool sna_tiling_copy_boxes(struct sna *sna, uint8_t alu, |
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy, |
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy, |
const BoxRec *box, int n); |
Bool sna_tiling_blt_copy_boxes(struct sna *sna, uint8_t alu, |
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy, |
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy, |
int bpp, const BoxRec *box, int nbox); |
Bool sna_blt_composite(struct sna *sna, |
uint32_t op, |
PicturePtr src, |
PicturePtr dst, |
int16_t src_x, int16_t src_y, |
int16_t dst_x, int16_t dst_y, |
int16_t width, int16_t height, |
struct sna_composite_op *tmp); |
bool sna_blt_fill(struct sna *sna, uint8_t alu, |
struct kgem_bo *bo, |
int bpp, |
uint32_t pixel, |
struct sna_fill_op *fill); |
bool sna_blt_copy(struct sna *sna, uint8_t alu, |
struct kgem_bo *src, |
struct kgem_bo *dst, |
int bpp, |
struct sna_copy_op *copy); |
Bool sna_blt_fill_boxes(struct sna *sna, uint8_t alu, |
struct kgem_bo *bo, |
int bpp, |
uint32_t pixel, |
const BoxRec *box, int n); |
Bool sna_blt_copy_boxes(struct sna *sna, uint8_t alu, |
struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy, |
struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy, |
int bpp, |
const BoxRec *box, int n); |
Bool sna_blt_copy_boxes_fallback(struct sna *sna, uint8_t alu, |
PixmapPtr src, struct kgem_bo *src_bo, int16_t src_dx, int16_t src_dy, |
PixmapPtr dst, struct kgem_bo *dst_bo, int16_t dst_dx, int16_t dst_dy, |
const BoxRec *box, int nbox); |
Bool _sna_get_pixel_from_rgba(uint32_t *pixel, |
uint16_t red, |
uint16_t green, |
uint16_t blue, |
uint16_t alpha, |
uint32_t format); |
static inline Bool |
sna_get_pixel_from_rgba(uint32_t * pixel, |
uint16_t red, |
uint16_t green, |
uint16_t blue, |
uint16_t alpha, |
uint32_t format) |
{ |
switch (format) { |
case PICT_x8r8g8b8: |
alpha = 0xffff; |
case PICT_a8r8g8b8: |
*pixel = ((alpha >> 8 << 24) | |
(red >> 8 << 16) | |
(green & 0xff00) | |
(blue >> 8)); |
return TRUE; |
case PICT_a8: |
*pixel = alpha >> 8; |
return TRUE; |
} |
return _sna_get_pixel_from_rgba(pixel, red, green, blue, alpha, format); |
} |
int |
sna_render_pixmap_bo(struct sna *sna, |
struct sna_composite_channel *channel, |
PixmapPtr pixmap, |
int16_t x, int16_t y, |
int16_t w, int16_t h, |
int16_t dst_x, int16_t dst_y); |
bool |
sna_render_pixmap_partial(struct sna *sna, |
PixmapPtr pixmap, |
struct kgem_bo *bo, |
struct sna_composite_channel *channel, |
int16_t x, int16_t y, |
int16_t w, int16_t h); |
int |
sna_render_picture_extract(struct sna *sna, |
PicturePtr picture, |
struct sna_composite_channel *channel, |
int16_t x, int16_t y, |
int16_t w, int16_t h, |
int16_t dst_x, int16_t dst_y); |
int |
sna_render_picture_fixup(struct sna *sna, |
PicturePtr picture, |
struct sna_composite_channel *channel, |
int16_t x, int16_t y, |
int16_t w, int16_t h, |
int16_t dst_x, int16_t dst_y); |
int |
sna_render_picture_convert(struct sna *sna, |
PicturePtr picture, |
struct sna_composite_channel *channel, |
PixmapPtr pixmap, |
int16_t x, int16_t y, |
int16_t w, int16_t h, |
int16_t dst_x, int16_t dst_y); |
inline static void sna_render_composite_redirect_init(struct sna_composite_op *op) |
{ |
struct sna_composite_redirect *t = &op->redirect; |
t->real_bo = NULL; |
t->damage = NULL; |
} |
Bool |
sna_render_composite_redirect(struct sna *sna, |
struct sna_composite_op *op, |
int x, int y, int width, int height); |
void |
sna_render_composite_redirect_done(struct sna *sna, |
const struct sna_composite_op *op); |
bool |
sna_composite_mask_is_opaque(PicturePtr mask); |
*/ |
#endif /* SNA_RENDER_H */ |
/drivers/video/drm/i915/sna/kgem.c |
---|
0,0 → 1,1718 |
/* |
* Copyright (c) 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
* |
* Authors: |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#ifdef HAVE_CONFIG_H |
#include "config.h" |
#endif |
#include <drmP.h> |
#include <drm.h> |
#include "i915_drm.h" |
#include "i915_drv.h" |
#include "intel_drv.h" |
#include <linux/kernel.h> |
#include "../bitmap.h" |
#include "sna.h" |
//#include "sna_reg.h" |
//#include <time.h> |
//#include <errno.h> |
#define NO_CACHE 1 |
#define list_is_empty list_empty |
#define list_init INIT_LIST_HEAD |
extern struct drm_device *main_device; |
static struct kgem_bo * |
search_linear_cache(struct kgem *kgem, unsigned int num_pages, |
unsigned flags); |
#define INT16_MAX (32767) |
#define PAGE_ALIGN(x) ALIGN(x, PAGE_SIZE) |
#define NUM_PAGES(x) (((x) + PAGE_SIZE-1) / PAGE_SIZE) |
#define MAX_GTT_VMA_CACHE 512 |
#define MAX_CPU_VMA_CACHE INT16_MAX |
#define MAP_PRESERVE_TIME 10 |
#define CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) & ~1)) |
#define MAKE_CPU_MAP(ptr) ((void*)((uintptr_t)(ptr) | 1)) |
struct kgem_partial_bo { |
struct kgem_bo base; |
void *mem; |
uint32_t used; |
uint32_t need_io : 1; |
uint32_t write : 2; |
uint32_t mmapped : 1; |
}; |
static struct kgem_bo *__kgem_freed_bo; |
static struct drm_i915_gem_exec_object2 _kgem_dummy_exec; |
static inline int bytes(struct kgem_bo *bo) |
{ |
return kgem_bo_size(bo); |
} |
#define bucket(B) (B)->size.pages.bucket |
#define num_pages(B) (B)->size.pages.count |
static void kgem_sna_reset(struct kgem *kgem) |
{ |
struct sna *sna = container_of(kgem, struct sna, kgem); |
sna->render.reset(sna); |
sna->blt_state.fill_bo = 0; |
} |
static void kgem_sna_flush(struct kgem *kgem) |
{ |
struct sna *sna = container_of(kgem, struct sna, kgem); |
sna->render.flush(sna); |
if (sna->render.solid_cache.dirty) |
sna_render_flush_solid(sna); |
} |
static int __gem_write(int fd, uint32_t handle, |
int offset, int length, |
const void *src) |
{ |
DBG(("%s(handle=%x, offset=%d, len=%d)\n", __FUNCTION__, |
handle, offset, length)); |
write_gem_object(handle, offset, length, src); |
return 0; |
} |
static int gem_write(int fd, uint32_t handle, |
int offset, int length, |
const void *src) |
{ |
u32 _offset; |
u32 _size; |
u8 *data_ptr; |
DBG(("%s(handle=%x, offset=%d, len=%d)\n", __FUNCTION__, |
handle, offset, length)); |
/* align the transfer to cachelines; fortuitously this is safe! */ |
if ((offset | length) & 63) { |
_offset = offset & ~63; |
_size = ALIGN(offset+length, 64) - _offset; |
data_ptr = (u8*)src + _offset - offset; |
} else { |
_offset = offset; |
_size = length; |
data_ptr = (u8*)src; |
} |
write_gem_object(handle, _offset, _size, data_ptr); |
return 0; |
} |
static void kgem_bo_retire(struct kgem *kgem, struct kgem_bo *bo) |
{ |
DBG(("%s: handle=%x, domain=%d\n", |
__FUNCTION__, bo->handle, bo->domain)); |
assert(!kgem_busy(kgem, bo->handle)); |
if (bo->domain == DOMAIN_GPU) |
kgem_retire(kgem); |
if (bo->exec == NULL) { |
DBG(("%s: retiring bo handle=%x (needed flush? %d), rq? %d\n", |
__FUNCTION__, bo->handle, bo->needs_flush, bo->rq != NULL)); |
bo->rq = NULL; |
list_del(&bo->request); |
bo->needs_flush = bo->flush; |
} |
} |
Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
const void *data, int length) |
{ |
assert(bo->refcnt); |
assert(!bo->purged); |
assert(!kgem_busy(kgem, bo->handle)); |
assert(length <= bytes(bo)); |
if (gem_write(kgem->fd, bo->handle, 0, length, data)) |
return FALSE; |
DBG(("%s: flush=%d, domain=%d\n", __FUNCTION__, bo->flush, bo->domain)); |
kgem_bo_retire(kgem, bo); |
bo->domain = DOMAIN_NONE; |
return TRUE; |
} |
static uint32_t gem_create(int fd, int num_pages) |
{ |
struct drm_i915_gem_object *obj; |
int ret; |
/* Allocate the new object */ |
obj = i915_gem_alloc_object(main_device, |
PAGE_SIZE * num_pages); |
if (obj == NULL) |
goto err1; |
ret = i915_gem_object_pin(obj, 4096, true); |
if (ret) |
goto err2; |
return (uint32_t)obj; |
err2: |
drm_gem_object_unreference(&obj->base); |
err1: |
return 0; |
} |
static bool |
kgem_bo_set_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
{ |
return true; |
} |
static bool |
kgem_bo_clear_purgeable(struct kgem *kgem, struct kgem_bo *bo) |
{ |
return true; |
} |
static void gem_close(int fd, uint32_t handle) |
{ |
destroy_gem_object(handle); |
} |
/* |
constant inline static unsigned long __fls(unsigned long word) |
{ |
asm("bsr %1,%0" |
: "=r" (word) |
: "rm" (word)); |
return word; |
} |
*/ |
constant inline static int cache_bucket(int num_pages) |
{ |
return __fls(num_pages); |
} |
static struct kgem_bo *__kgem_bo_init(struct kgem_bo *bo, |
int handle, int num_pages) |
{ |
assert(num_pages); |
memset(bo, 0, sizeof(*bo)); |
bo->refcnt = 1; |
bo->handle = handle; |
num_pages(bo) = num_pages; |
bucket(bo) = cache_bucket(num_pages); |
bo->reusable = true; |
bo->domain = DOMAIN_CPU; |
list_init(&bo->request); |
list_init(&bo->list); |
list_init(&bo->vma); |
return bo; |
} |
static struct kgem_bo *__kgem_bo_alloc(int handle, int num_pages) |
{ |
struct kgem_bo *bo; |
if (__kgem_freed_bo) { |
bo = __kgem_freed_bo; |
__kgem_freed_bo = *(struct kgem_bo **)bo; |
} else { |
bo = malloc(sizeof(*bo)); |
if (bo == NULL) |
return NULL; |
} |
return __kgem_bo_init(bo, handle, num_pages); |
} |
static struct kgem_request _kgem_static_request; |
static struct kgem_request *__kgem_request_alloc(void) |
{ |
struct kgem_request *rq; |
rq = malloc(sizeof(*rq)); |
if (rq == NULL) |
rq = &_kgem_static_request; |
list_init(&rq->buffers); |
return rq; |
} |
static struct list_head *inactive(struct kgem *kgem, int num_pages) |
{ |
return &kgem->inactive[cache_bucket(num_pages)]; |
} |
static struct list_head *active(struct kgem *kgem, int num_pages, int tiling) |
{ |
return &kgem->active[cache_bucket(num_pages)][tiling]; |
} |
void kgem_init(struct kgem *kgem, int gen) |
{ |
struct drm_i915_gem_get_aperture aperture; |
struct drm_i915_gem_object *obj; |
size_t totalram; |
unsigned int i, j; |
int ret; |
memset(kgem, 0, sizeof(*kgem)); |
kgem->gen = gen; |
kgem->wedged = 0; |
// kgem->wedged |= DBG_NO_HW; |
obj = i915_gem_alloc_object(main_device, 4096*4); |
if (obj == NULL) |
goto err2; |
ret = i915_gem_object_pin(obj, 4096, true); |
if (ret) |
goto err3; |
kgem->batch_ptr = drm_intel_bo_map(obj, true); |
kgem->batch = kgem->batch_ptr; |
kgem->batch_idx = 0; |
kgem->batch_obj = obj; |
kgem->max_batch_size = 1024; //ARRAY_SIZE(kgem->batch); |
kgem->half_cpu_cache_pages = (2048*1024) >> 13; |
list_init(&kgem->partial); |
list_init(&kgem->requests); |
list_init(&kgem->flushing); |
list_init(&kgem->large); |
for (i = 0; i < ARRAY_SIZE(kgem->inactive); i++) |
list_init(&kgem->inactive[i]); |
for (i = 0; i < ARRAY_SIZE(kgem->active); i++) { |
for (j = 0; j < ARRAY_SIZE(kgem->active[i]); j++) |
list_init(&kgem->active[i][j]); |
} |
for (i = 0; i < ARRAY_SIZE(kgem->vma); i++) { |
for (j = 0; j < ARRAY_SIZE(kgem->vma[i].inactive); j++) |
list_init(&kgem->vma[i].inactive[j]); |
} |
kgem->vma[MAP_GTT].count = -MAX_GTT_VMA_CACHE; |
kgem->vma[MAP_CPU].count = -MAX_CPU_VMA_CACHE; |
kgem->next_request = __kgem_request_alloc(); |
//#if defined(USE_VMAP) && defined(I915_PARAM_HAS_VMAP) |
// if (!DBG_NO_VMAP) |
// kgem->has_vmap = gem_param(kgem, I915_PARAM_HAS_VMAP) > 0; |
//#endif |
// DBG(("%s: using vmap=%d\n", __FUNCTION__, kgem->has_vmap)); |
if (gen < 40) { |
// if (!DBG_NO_RELAXED_FENCING) { |
// kgem->has_relaxed_fencing = |
// gem_param(kgem, I915_PARAM_HAS_RELAXED_FENCING) > 0; |
// } |
} else |
kgem->has_relaxed_fencing = 1; |
DBG(("%s: has relaxed fencing? %d\n", __FUNCTION__, |
kgem->has_relaxed_fencing)); |
kgem->has_llc = (gen >= 60)?true:false; |
kgem->has_cpu_bo = kgem->has_llc; |
DBG(("%s: cpu bo enabled %d: llc? %d\n", __FUNCTION__, |
kgem->has_cpu_bo, kgem->has_llc)); |
kgem->has_semaphores = false; |
// if (gen >= 60 && semaphores_enabled()) |
// kgem->has_semaphores = true; |
// DBG(("%s: semaphores enabled? %d\n", __FUNCTION__, |
// kgem->has_semaphores)); |
VG_CLEAR(aperture); |
aperture.aper_size = 64*1024*1024; |
i915_gem_get_aperture_ioctl(main_device, &aperture, NULL); |
kgem->aperture_total = aperture.aper_size; |
kgem->aperture_high = aperture.aper_size * 3/4; |
kgem->aperture_low = aperture.aper_size * 1/3; |
DBG(("%s: aperture low=%d [%d], high=%d [%d]\n", __FUNCTION__, |
kgem->aperture_low, kgem->aperture_low / (1024*1024), |
kgem->aperture_high, kgem->aperture_high / (1024*1024))); |
kgem->aperture_mappable = aperture.aper_size; |
DBG(("%s: aperture mappable=%d [%d MiB]\n", __FUNCTION__, |
kgem->aperture_mappable, kgem->aperture_mappable / (1024*1024))); |
kgem->partial_buffer_size = 64 * 1024; |
while (kgem->partial_buffer_size < kgem->aperture_mappable >> 10) |
kgem->partial_buffer_size *= 2; |
DBG(("%s: partial buffer size=%d [%d KiB]\n", __FUNCTION__, |
kgem->partial_buffer_size, kgem->partial_buffer_size / 1024)); |
kgem->min_alignment = 4; |
if (gen < 60) |
/* XXX workaround an issue where we appear to fail to |
* disable dual-stream mode */ |
kgem->min_alignment = 64; |
kgem->max_object_size = 2 * kgem->aperture_total / 3; |
kgem->max_cpu_size = kgem->max_object_size; |
kgem->max_gpu_size = kgem->max_object_size; |
if (!kgem->has_llc) |
kgem->max_gpu_size = MAX_CACHE_SIZE; |
if (gen < 40) { |
/* If we have to use fences for blitting, we have to make |
* sure we can fit them into the aperture. |
*/ |
kgem->max_gpu_size = kgem->aperture_mappable / 2; |
if (kgem->max_gpu_size > kgem->aperture_low) |
kgem->max_gpu_size = kgem->aperture_low; |
} |
if (kgem->max_gpu_size > kgem->max_cpu_size) |
kgem->max_gpu_size = kgem->max_cpu_size; |
kgem->max_upload_tile_size = kgem->aperture_mappable / 2; |
if (kgem->max_upload_tile_size > kgem->max_gpu_size / 2) |
kgem->max_upload_tile_size = kgem->max_gpu_size / 2; |
kgem->max_copy_tile_size = (MAX_CACHE_SIZE + 1)/2; |
if (kgem->max_copy_tile_size > kgem->max_gpu_size / 2) |
kgem->max_copy_tile_size = kgem->max_gpu_size / 2; |
totalram = 1024*1024; //total_ram_size(); |
if (totalram == 0) { |
DBG(("%s: total ram size unknown, assuming maximum of total aperture\n", |
__FUNCTION__)); |
totalram = kgem->aperture_total; |
} |
if (kgem->max_object_size > totalram / 2) |
kgem->max_object_size = totalram / 2; |
if (kgem->max_cpu_size > totalram / 2) |
kgem->max_cpu_size = totalram / 2; |
if (kgem->max_gpu_size > totalram / 4) |
kgem->max_gpu_size = totalram / 4; |
kgem->large_object_size = MAX_CACHE_SIZE; |
if (kgem->large_object_size > kgem->max_gpu_size) |
kgem->large_object_size = kgem->max_gpu_size; |
DBG(("%s: large object thresold=%d\n", |
__FUNCTION__, kgem->large_object_size)); |
DBG(("%s: max object size (gpu=%d, cpu=%d, tile upload=%d, copy=%d)\n", |
__FUNCTION__, |
kgem->max_gpu_size, kgem->max_cpu_size, |
kgem->max_upload_tile_size, kgem->max_copy_tile_size)); |
/* Convert the aperture thresholds to pages */ |
kgem->aperture_low /= PAGE_SIZE; |
kgem->aperture_high /= PAGE_SIZE; |
// kgem->fence_max = gem_param(kgem, I915_PARAM_NUM_FENCES_AVAIL) - 2; |
// if ((int)kgem->fence_max < 0) |
kgem->fence_max = 5; /* minimum safe value for all hw */ |
DBG(("%s: max fences=%d\n", __FUNCTION__, kgem->fence_max)); |
err3: |
err2: |
return; |
} |
static struct drm_i915_gem_exec_object2 * |
kgem_add_handle(struct kgem *kgem, struct kgem_bo *bo) |
{ |
struct drm_i915_gem_exec_object2 *exec; |
DBG(("%s: handle=%d, index=%d\n", |
__FUNCTION__, bo->handle, kgem->nexec)); |
assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
exec = memset(&kgem->exec[kgem->nexec++], 0, sizeof(*exec)); |
exec->handle = bo->handle; |
exec->offset = bo->presumed_offset; |
kgem->aperture += num_pages(bo); |
return exec; |
} |
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
{ |
bo->exec = kgem_add_handle(kgem, bo); |
bo->rq = kgem->next_request; |
list_move(&bo->request, &kgem->next_request->buffers); |
/* XXX is it worth working around gcc here? */ |
kgem->flush |= bo->flush; |
kgem->sync |= bo->sync; |
kgem->scanout |= bo->scanout; |
} |
static uint32_t kgem_end_batch(struct kgem *kgem) |
{ |
// kgem->context_switch(kgem, KGEM_NONE); |
kgem->batch[kgem->nbatch++] = MI_BATCH_BUFFER_END; |
if (kgem->nbatch & 1) |
kgem->batch[kgem->nbatch++] = MI_NOOP; |
return kgem->nbatch; |
} |
static void kgem_fixup_self_relocs(struct kgem *kgem, struct kgem_bo *bo) |
{ |
int n; |
for (n = 0; n < kgem->nreloc; n++) |
{ |
if (kgem->reloc[n].target_handle == 0) |
{ |
kgem->reloc[n].target_handle = bo->handle; |
kgem->reloc[n].presumed_offset = bo->presumed_offset; |
kgem->batch[kgem->reloc[n].offset/sizeof(kgem->batch[0])] = |
kgem->reloc[n].delta + bo->presumed_offset; |
dbgprintf("fixup reloc %d pos %d handle %d delta %x \n", |
n, kgem->reloc[n].offset/sizeof(kgem->batch[0]), |
bo->handle, kgem->reloc[n].delta); |
} |
} |
} |
static void kgem_bo_binding_free(struct kgem *kgem, struct kgem_bo *bo) |
{ |
struct kgem_bo_binding *b; |
b = bo->binding.next; |
while (b) { |
struct kgem_bo_binding *next = b->next; |
free (b); |
b = next; |
} |
} |
static void kgem_bo_release_map(struct kgem *kgem, struct kgem_bo *bo) |
{ |
int type = IS_CPU_MAP(bo->map); |
DBG(("%s: releasing %s vma for handle=%d, count=%d\n", |
__FUNCTION__, type ? "CPU" : "GTT", |
bo->handle, kgem->vma[type].count)); |
VG(if (type) VALGRIND_FREELIKE_BLOCK(CPU_MAP(bo->map), 0)); |
// munmap(CPU_MAP(bo->map), bytes(bo)); |
bo->map = NULL; |
if (!list_is_empty(&bo->vma)) { |
list_del(&bo->vma); |
kgem->vma[type].count--; |
} |
} |
static void kgem_bo_free(struct kgem *kgem, struct kgem_bo *bo) |
{ |
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
assert(bo->refcnt == 0); |
assert(bo->exec == NULL); |
kgem_bo_binding_free(kgem, bo); |
if (bo->map) |
kgem_bo_release_map(kgem, bo); |
assert(list_is_empty(&bo->vma)); |
list_del(&bo->list); |
list_del(&bo->request); |
gem_close(kgem->fd, bo->handle); |
if (!bo->io) { |
*(struct kgem_bo **)bo = __kgem_freed_bo; |
__kgem_freed_bo = bo; |
} else |
free(bo); |
} |
inline static void kgem_bo_move_to_inactive(struct kgem *kgem, |
struct kgem_bo *bo) |
{ |
assert(!kgem_busy(kgem, bo->handle)); |
assert(!bo->proxy); |
assert(!bo->io); |
assert(!bo->needs_flush); |
assert(bo->rq == NULL); |
assert(bo->domain != DOMAIN_GPU); |
if (bucket(bo) >= NUM_CACHE_BUCKETS) { |
kgem_bo_free(kgem, bo); |
return; |
} |
list_move(&bo->list, &kgem->inactive[bucket(bo)]); |
if (bo->map) { |
int type = IS_CPU_MAP(bo->map); |
if (bucket(bo) >= NUM_CACHE_BUCKETS || |
(!type && !kgem_bo_is_mappable(kgem, bo))) { |
list_del(&bo->vma); |
// munmap(CPU_MAP(bo->map), bytes(bo)); |
bo->map = NULL; |
} |
if (bo->map) { |
list_move(&bo->vma, &kgem->vma[type].inactive[bucket(bo)]); |
kgem->vma[type].count++; |
} |
} |
kgem->need_expire = true; |
} |
inline static void kgem_bo_remove_from_inactive(struct kgem *kgem, |
struct kgem_bo *bo) |
{ |
list_del(&bo->list); |
assert(bo->rq == NULL); |
if (bo->map) { |
assert(!list_is_empty(&bo->vma)); |
list_del(&bo->vma); |
kgem->vma[IS_CPU_MAP(bo->map)].count--; |
} |
} |
inline static void kgem_bo_remove_from_active(struct kgem *kgem, |
struct kgem_bo *bo) |
{ |
list_del(&bo->list); |
if (bo->rq == &_kgem_static_request) |
list_del(&bo->request); |
assert(list_is_empty(&bo->vma)); |
} |
static void __kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
{ |
DBG(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
assert(list_is_empty(&bo->list)); |
assert(bo->refcnt == 0); |
bo->binding.offset = 0; |
if (NO_CACHE) |
goto destroy; |
if (bo->io) { |
struct kgem_bo *base; |
base = malloc(sizeof(*base)); |
if (base) { |
DBG(("%s: transferring io handle=%d to bo\n", |
__FUNCTION__, bo->handle)); |
/* transfer the handle to a minimum bo */ |
memcpy(base, bo, sizeof (*base)); |
base->reusable = true; |
base->io = false; |
list_init(&base->list); |
list_replace(&bo->request, &base->request); |
list_replace(&bo->vma, &base->vma); |
free(bo); |
bo = base; |
} |
} |
if (!bo->reusable) { |
DBG(("%s: handle=%d, not reusable\n", |
__FUNCTION__, bo->handle)); |
goto destroy; |
} |
if (!kgem->has_llc && IS_CPU_MAP(bo->map) && bo->domain != DOMAIN_CPU) |
kgem_bo_release_map(kgem, bo); |
assert(list_is_empty(&bo->vma)); |
assert(list_is_empty(&bo->list)); |
assert(bo->vmap == false && bo->sync == false); |
assert(bo->io == false); |
bo->scanout = bo->flush = false; |
if (bo->rq) { |
struct list *cache; |
DBG(("%s: handle=%d -> active\n", __FUNCTION__, bo->handle)); |
if (bucket(bo) < NUM_CACHE_BUCKETS) |
cache = &kgem->active[bucket(bo)][bo->tiling]; |
else |
cache = &kgem->large; |
list_add(&bo->list, cache); |
return; |
} |
assert(bo->exec == NULL); |
assert(list_is_empty(&bo->request)); |
/* |
if (bo->needs_flush) { |
if ((bo->needs_flush = kgem_busy(kgem, bo->handle))) { |
struct list *cache; |
DBG(("%s: handle=%d -> flushing\n", |
__FUNCTION__, bo->handle)); |
list_add(&bo->request, &kgem->flushing); |
if (bucket(bo) < NUM_CACHE_BUCKETS) |
cache = &kgem->active[bucket(bo)][bo->tiling]; |
else |
cache = &kgem->large; |
list_add(&bo->list, cache); |
bo->rq = &_kgem_static_request; |
return; |
} |
bo->domain = DOMAIN_NONE; |
} |
*/ |
if (!IS_CPU_MAP(bo->map)) { |
if (!kgem_bo_set_purgeable(kgem, bo)) |
goto destroy; |
if (!kgem->has_llc && bo->domain == DOMAIN_CPU) |
goto destroy; |
DBG(("%s: handle=%d, purged\n", |
__FUNCTION__, bo->handle)); |
} |
DBG(("%s: handle=%d -> inactive\n", __FUNCTION__, bo->handle)); |
kgem_bo_move_to_inactive(kgem, bo); |
return; |
destroy: |
if (!bo->exec) |
kgem_bo_free(kgem, bo); |
} |
bool kgem_retire(struct kgem *kgem) |
{ |
struct kgem_bo *bo, *next; |
bool retired = false; |
DBG(("%s\n", __FUNCTION__)); |
list_for_each_entry_safe(bo, next, &kgem->flushing, request) { |
assert(bo->refcnt == 0); |
assert(bo->rq == &_kgem_static_request); |
assert(bo->exec == NULL); |
// if (kgem_busy(kgem, bo->handle)) |
// break; |
DBG(("%s: moving %d from flush to inactive\n", |
__FUNCTION__, bo->handle)); |
if (kgem_bo_set_purgeable(kgem, bo)) { |
bo->needs_flush = false; |
bo->domain = DOMAIN_NONE; |
bo->rq = NULL; |
list_del(&bo->request); |
kgem_bo_move_to_inactive(kgem, bo); |
} else |
kgem_bo_free(kgem, bo); |
retired = true; |
} |
while (!list_is_empty(&kgem->requests)) { |
struct kgem_request *rq; |
rq = list_first_entry(&kgem->requests, |
struct kgem_request, |
list); |
// if (kgem_busy(kgem, rq->bo->handle)) |
// break; |
DBG(("%s: request %d complete\n", |
__FUNCTION__, rq->bo->handle)); |
while (!list_is_empty(&rq->buffers)) { |
bo = list_first_entry(&rq->buffers, |
struct kgem_bo, |
request); |
assert(bo->rq == rq); |
assert(bo->exec == NULL); |
assert(bo->domain == DOMAIN_GPU); |
list_del(&bo->request); |
bo->rq = NULL; |
// if (bo->needs_flush) |
// bo->needs_flush = kgem_busy(kgem, bo->handle); |
if (!bo->needs_flush) |
bo->domain = DOMAIN_NONE; |
if (bo->refcnt) |
continue; |
if (!bo->reusable) { |
DBG(("%s: closing %d\n", |
__FUNCTION__, bo->handle)); |
kgem_bo_free(kgem, bo); |
continue; |
} |
if (bo->needs_flush) { |
DBG(("%s: moving %d to flushing\n", |
__FUNCTION__, bo->handle)); |
list_add(&bo->request, &kgem->flushing); |
bo->rq = &_kgem_static_request; |
} else if (kgem_bo_set_purgeable(kgem, bo)) { |
DBG(("%s: moving %d to inactive\n", |
__FUNCTION__, bo->handle)); |
kgem_bo_move_to_inactive(kgem, bo); |
retired = true; |
} else { |
DBG(("%s: closing %d\n", |
__FUNCTION__, bo->handle)); |
kgem_bo_free(kgem, bo); |
} |
} |
rq->bo->refcnt--; |
assert(rq->bo->refcnt == 0); |
assert(rq->bo->rq == NULL); |
assert(list_is_empty(&rq->bo->request)); |
if (kgem_bo_set_purgeable(kgem, rq->bo)) { |
kgem_bo_move_to_inactive(kgem, rq->bo); |
retired = true; |
} else { |
DBG(("%s: closing %d\n", |
__FUNCTION__, rq->bo->handle)); |
kgem_bo_free(kgem, rq->bo); |
} |
list_del(&rq->list); |
free(rq); |
} |
kgem->need_retire = !list_is_empty(&kgem->requests); |
DBG(("%s -- need_retire=%d\n", __FUNCTION__, kgem->need_retire)); |
kgem->retire(kgem); |
return retired; |
} |
static int kgem_batch_write(struct kgem *kgem, uint32_t handle, uint32_t size) |
{ |
int ret; |
assert(!kgem_busy(kgem, handle)); |
/* If there is no surface data, just upload the batch */ |
if (kgem->surface == kgem->max_batch_size) |
return gem_write(kgem->fd, handle, |
0, sizeof(uint32_t)*kgem->nbatch, |
kgem->batch); |
/* Are the batch pages conjoint with the surface pages? */ |
if (kgem->surface < kgem->nbatch + PAGE_SIZE/4) { |
assert(size == sizeof(kgem->batch)); |
return gem_write(kgem->fd, handle, |
0, sizeof(kgem->batch), |
kgem->batch); |
} |
/* Disjoint surface/batch, upload separately */ |
ret = gem_write(kgem->fd, handle, |
0, sizeof(uint32_t)*kgem->nbatch, |
kgem->batch); |
if (ret) |
return ret; |
assert(kgem->nbatch*sizeof(uint32_t) <= |
sizeof(uint32_t)*kgem->surface - (sizeof(kgem->batch)-size)); |
return __gem_write(kgem->fd, handle, |
sizeof(uint32_t)*kgem->surface - (sizeof(kgem->batch)-size), |
sizeof(kgem->batch) - sizeof(uint32_t)*kgem->surface, |
kgem->batch + kgem->surface); |
} |
void kgem_reset(struct kgem *kgem) |
{ |
// ENTER(); |
kgem->nfence = 0; |
kgem->nexec = 0; |
kgem->nreloc = 0; |
kgem->aperture = 0; |
kgem->aperture_fenced = 0; |
kgem->nbatch = 0; |
kgem->surface = kgem->max_batch_size; |
kgem->mode = KGEM_NONE; |
kgem->flush = 0; |
kgem->scanout = 0; |
kgem->batch = kgem->batch_ptr+1024*kgem->batch_idx; |
kgem->next_request = __kgem_request_alloc(); |
kgem_sna_reset(kgem); |
// dbgprintf("surface %x\n", kgem->surface); |
// LEAVE(); |
} |
static int compact_batch_surface(struct kgem *kgem) |
{ |
int size, shrink, n; |
/* See if we can pack the contents into one or two pages */ |
size = kgem->max_batch_size - kgem->surface + kgem->nbatch; |
if (size > 2048) |
return sizeof(kgem->batch); |
else if (size > 1024) |
size = 8192, shrink = 2*4096; |
else |
size = 4096, shrink = 3*4096; |
for (n = 0; n < kgem->nreloc; n++) { |
if (kgem->reloc[n].read_domains == I915_GEM_DOMAIN_INSTRUCTION && |
kgem->reloc[n].target_handle == 0) |
kgem->reloc[n].delta -= shrink; |
if (kgem->reloc[n].offset >= size) |
kgem->reloc[n].offset -= shrink; |
} |
return size; |
} |
int exec_batch(struct drm_device *dev, struct intel_ring_buffer *ring, |
batchbuffer_t *exec); |
void _kgem_submit(struct kgem *kgem, batchbuffer_t *exb) |
{ |
struct kgem_request *rq; |
uint32_t batch_end; |
int size; |
assert(!DBG_NO_HW); |
assert(kgem->nbatch); |
assert(kgem->nbatch <= KGEM_BATCH_SIZE(kgem)); |
assert(kgem->nbatch <= kgem->surface); |
batch_end = kgem_end_batch(kgem); |
kgem_sna_flush(kgem); |
DBG(("batch[%d/%d]: %d %d %d, nreloc=%d, nexec=%d, nfence=%d, aperture=%d\n", |
kgem->mode, kgem->ring, batch_end, kgem->nbatch, kgem->surface, |
kgem->nreloc, kgem->nexec, kgem->nfence, kgem->aperture)); |
assert(kgem->nbatch <= kgem->max_batch_size); |
assert(kgem->nbatch <= kgem->surface); |
assert(kgem->nreloc <= ARRAY_SIZE(kgem->reloc)); |
assert(kgem->nexec < ARRAY_SIZE(kgem->exec)); |
assert(kgem->nfence <= kgem->fence_max); |
// kgem_finish_partials(kgem); |
rq = kgem->next_request; |
// if (kgem->surface != kgem->max_batch_size) |
// size = compact_batch_surface(kgem); |
// else |
size = kgem->nbatch * sizeof(kgem->batch[0]); |
#if 0 |
{ |
int i; |
dbgprintf("\nDump batch\n\n"); |
for(i=0; i < kgem->nbatch; i++) |
{ |
dbgprintf("\t0x%08x,\t/* %d */\n", |
kgem->batch[i], i); |
} |
dbgprintf("\ndone\n"); |
}; |
#endif |
exb->batch = kgem->batch_obj; |
exb->exec_start = kgem->batch_obj->gtt_offset+kgem->batch_idx*4096; |
exb->exec_len = sizeof(uint32_t)*kgem->nbatch; |
exec_batch(main_device, NULL, exb); |
// if (kgem->wedged) |
// kgem_cleanup(kgem); |
kgem->batch_idx++; |
kgem->batch_idx&= 3; |
kgem->flush_now = kgem->scanout; |
kgem_reset(kgem); |
assert(kgem->next_request != NULL); |
} |
static struct kgem_bo * |
search_linear_cache(struct kgem *kgem, unsigned int num_pages, unsigned flags) |
{ |
struct kgem_bo *bo, *first = NULL; |
bool use_active = (flags & CREATE_INACTIVE) == 0; |
struct list_head *cache; |
if (num_pages >= MAX_CACHE_SIZE / PAGE_SIZE) |
return NULL; |
if (!use_active && |
list_is_empty(inactive(kgem, num_pages)) && |
!list_is_empty(active(kgem, num_pages, I915_TILING_NONE)) && |
!kgem_retire(kgem)) |
return NULL; |
if (!use_active && flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
int for_cpu = !!(flags & CREATE_CPU_MAP); |
cache = &kgem->vma[for_cpu].inactive[cache_bucket(num_pages)]; |
list_for_each_entry(bo, cache, vma) { |
assert(IS_CPU_MAP(bo->map) == for_cpu); |
assert(bucket(bo) == cache_bucket(num_pages)); |
if (num_pages > num_pages(bo)) { |
DBG(("inactive too small: %d < %d\n", |
num_pages(bo), num_pages)); |
continue; |
} |
if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
kgem_bo_free(kgem, bo); |
break; |
} |
// if (I915_TILING_NONE != bo->tiling && |
// gem_set_tiling(kgem->fd, bo->handle, |
// I915_TILING_NONE, 0) != I915_TILING_NONE) |
// continue; |
kgem_bo_remove_from_inactive(kgem, bo); |
bo->tiling = I915_TILING_NONE; |
bo->pitch = 0; |
bo->delta = 0; |
DBG((" %s: found handle=%d (num_pages=%d) in linear vma cache\n", |
__FUNCTION__, bo->handle, num_pages(bo))); |
assert(use_active || bo->domain != DOMAIN_GPU); |
assert(!bo->needs_flush); |
//assert(!kgem_busy(kgem, bo->handle)); |
return bo; |
} |
} |
cache = use_active ? active(kgem, num_pages, I915_TILING_NONE) : inactive(kgem, num_pages); |
list_for_each_entry(bo, cache, list) { |
assert(bo->refcnt == 0); |
assert(bo->reusable); |
assert(!!bo->rq == !!use_active); |
if (num_pages > num_pages(bo)) |
continue; |
if (use_active && bo->tiling != I915_TILING_NONE) |
continue; |
if (bo->purged && !kgem_bo_clear_purgeable(kgem, bo)) { |
kgem_bo_free(kgem, bo); |
break; |
} |
/* |
if (I915_TILING_NONE != bo->tiling) { |
if (use_active) |
continue; |
if (gem_set_tiling(kgem->fd, bo->handle, |
I915_TILING_NONE, 0) != I915_TILING_NONE) |
continue; |
bo->tiling = I915_TILING_NONE; |
} |
*/ |
if (bo->map) { |
if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
int for_cpu = !!(flags & CREATE_CPU_MAP); |
if (IS_CPU_MAP(bo->map) != for_cpu) { |
if (first != NULL) |
break; |
first = bo; |
continue; |
} |
} else { |
if (first != NULL) |
break; |
first = bo; |
continue; |
} |
} else { |
if (flags & (CREATE_CPU_MAP | CREATE_GTT_MAP)) { |
if (first != NULL) |
break; |
first = bo; |
continue; |
} |
} |
if (use_active) |
kgem_bo_remove_from_active(kgem, bo); |
else |
kgem_bo_remove_from_inactive(kgem, bo); |
assert(bo->tiling == I915_TILING_NONE); |
bo->pitch = 0; |
bo->delta = 0; |
DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
__FUNCTION__, bo->handle, num_pages(bo), |
use_active ? "active" : "inactive")); |
assert(use_active || bo->domain != DOMAIN_GPU); |
assert(!bo->needs_flush || use_active); |
//assert(use_active || !kgem_busy(kgem, bo->handle)); |
return bo; |
} |
if (first) { |
assert(first->tiling == I915_TILING_NONE); |
if (use_active) |
kgem_bo_remove_from_active(kgem, first); |
else |
kgem_bo_remove_from_inactive(kgem, first); |
first->pitch = 0; |
first->delta = 0; |
DBG((" %s: found handle=%d (num_pages=%d) in linear %s cache\n", |
__FUNCTION__, first->handle, num_pages(first), |
use_active ? "active" : "inactive")); |
assert(use_active || first->domain != DOMAIN_GPU); |
assert(!first->needs_flush || use_active); |
//assert(use_active || !kgem_busy(kgem, first->handle)); |
return first; |
} |
return NULL; |
} |
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size) |
{ |
struct kgem_bo *bo; |
uint32_t handle; |
DBG(("%s(%d)\n", __FUNCTION__, size)); |
size = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
bo = search_linear_cache(kgem, size, CREATE_INACTIVE); |
if (bo) |
return kgem_bo_reference(bo); |
handle = gem_create(kgem->fd, size); |
if (handle == 0) |
return NULL; |
DBG(("%s: new handle=%x\n", __FUNCTION__, handle)); |
bo = __kgem_bo_alloc(handle, size); |
if (bo == NULL) { |
gem_close(kgem->fd, handle); |
return NULL; |
} |
struct drm_i915_gem_object *obj; |
obj = (void*)handle; |
bo->gaddr = obj->gtt_offset; |
return bo; |
} |
inline int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo) |
{ |
unsigned int size; |
assert(bo->tiling); |
assert(kgem->gen < 40); |
if (kgem->gen < 30) |
size = 512 * 1024; |
else |
size = 1024 * 1024; |
while (size < bytes(bo)) |
size *= 2; |
return size; |
} |
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
{ |
// if (bo->proxy) { |
// assert(bo->map == NULL); |
// if (bo->io && bo->exec == NULL) |
// _kgem_bo_delete_partial(kgem, bo); |
// kgem_bo_unref(kgem, bo->proxy); |
// kgem_bo_binding_free(kgem, bo); |
// _list_del(&bo->request); |
// free(bo); |
// return; |
// } |
// if (bo->vmap) |
// kgem_bo_sync__cpu(kgem, bo); |
__kgem_bo_destroy(kgem, bo); |
} |
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo) |
{ |
/* The kernel will emit a flush *and* update its own flushing lists. */ |
// kgem_busy(kgem, bo->handle); |
} |
bool kgem_check_bo(struct kgem *kgem, ...) |
{ |
va_list ap; |
struct kgem_bo *bo; |
int num_exec = 0; |
int num_pages = 0; |
va_start(ap, kgem); |
while ((bo = va_arg(ap, struct kgem_bo *))) { |
if (bo->exec) |
continue; |
if (bo->proxy) { |
bo = bo->proxy; |
if (bo->exec) |
continue; |
} |
num_pages += num_pages(bo); |
num_exec++; |
} |
va_end(ap); |
if (!num_pages) |
return true; |
if (kgem->aperture > kgem->aperture_low) |
return false; |
if (num_pages + kgem->aperture > kgem->aperture_high) |
return false; |
if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) |
return false; |
return true; |
} |
/* |
bool kgem_check_bo_fenced(struct kgem *kgem, ...) |
{ |
va_list ap; |
struct kgem_bo *bo; |
int num_fence = 0; |
int num_exec = 0; |
int num_pages = 0; |
int fenced_size = 0; |
va_start(ap, kgem); |
while ((bo = va_arg(ap, struct kgem_bo *))) { |
if (bo->proxy) |
bo = bo->proxy; |
if (bo->exec) { |
if (kgem->gen >= 40 || bo->tiling == I915_TILING_NONE) |
continue; |
if ((bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
fenced_size += kgem_bo_fenced_size(kgem, bo); |
num_fence++; |
} |
continue; |
} |
num_pages += num_pages(bo); |
num_exec++; |
if (kgem->gen < 40 && bo->tiling) { |
fenced_size += kgem_bo_fenced_size(kgem, bo); |
num_fence++; |
} |
} |
va_end(ap); |
if (fenced_size + kgem->aperture_fenced > kgem->aperture_mappable) |
return false; |
if (kgem->nfence + num_fence > kgem->fence_max) |
return false; |
if (!num_pages) |
return true; |
if (kgem->aperture > kgem->aperture_low) |
return false; |
if (num_pages + kgem->aperture > kgem->aperture_high) |
return false; |
if (kgem->nexec + num_exec >= KGEM_EXEC_SIZE(kgem)) |
return false; |
return true; |
} |
*/ |
#if 0 |
uint32_t kgem_add_reloc(struct kgem *kgem, |
uint32_t pos, |
struct kgem_bo *bo, |
uint32_t read_write_domain, |
uint32_t delta) |
{ |
int index; |
DBG(("%s: handle=%d, pos=%d, delta=%d, domains=%08x\n", |
__FUNCTION__, bo ? bo->handle : 0, pos, delta, read_write_domain)); |
assert((read_write_domain & 0x7fff) == 0 || bo != NULL); |
index = kgem->nreloc++; |
assert(index < ARRAY_SIZE(kgem->reloc)); |
kgem->reloc[index].offset = pos * sizeof(kgem->batch[0]); |
if (bo) { |
assert(bo->refcnt); |
assert(!bo->purged); |
delta += bo->delta; |
if (bo->proxy) { |
DBG(("%s: adding proxy for handle=%d\n", |
__FUNCTION__, bo->handle)); |
assert(bo->handle == bo->proxy->handle); |
/* need to release the cache upon batch submit */ |
list_move(&bo->request, &kgem->next_request->buffers); |
bo->exec = &_kgem_dummy_exec; |
bo = bo->proxy; |
} |
assert(!bo->purged); |
// if (bo->exec == NULL) |
// _kgem_add_bo(kgem, bo); |
// if (kgem->gen < 40 && read_write_domain & KGEM_RELOC_FENCED) { |
// if (bo->tiling && |
// (bo->exec->flags & EXEC_OBJECT_NEEDS_FENCE) == 0) { |
// assert(kgem->nfence < kgem->fence_max); |
// kgem->aperture_fenced += |
// kgem_bo_fenced_size(kgem, bo); |
// kgem->nfence++; |
// } |
// bo->exec->flags |= EXEC_OBJECT_NEEDS_FENCE; |
// } |
kgem->reloc[index].delta = delta; |
kgem->reloc[index].target_handle = bo->handle; |
kgem->reloc[index].presumed_offset = bo->presumed_offset; |
if (read_write_domain & 0x7fff) { |
DBG(("%s: marking handle=%d dirty\n", |
__FUNCTION__, bo->handle)); |
bo->needs_flush = bo->dirty = true; |
} |
delta += bo->presumed_offset; |
} else { |
kgem->reloc[index].delta = delta; |
kgem->reloc[index].target_handle = 0; |
kgem->reloc[index].presumed_offset = 0; |
} |
kgem->reloc[index].read_domains = read_write_domain >> 16; |
kgem->reloc[index].write_domain = read_write_domain & 0x7fff; |
return delta; |
} |
#endif |
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo) |
{ |
void *ptr; |
DBG(("%s: handle=%d, offset=%d, tiling=%d, map=%p, domain=%d\n", __FUNCTION__, |
bo->handle, bo->presumed_offset, bo->tiling, bo->map, bo->domain)); |
assert(!bo->purged); |
assert(bo->exec == NULL); |
assert(list_is_empty(&bo->list)); |
// if (bo->tiling == I915_TILING_NONE && |
// (kgem->has_llc || bo->domain == bo->presumed_offset)) { |
DBG(("%s: converting request for GTT map into CPU map\n", |
__FUNCTION__)); |
ptr = kgem_bo_map__cpu(kgem, bo); |
// kgem_bo_sync__cpu(kgem, bo); |
return ptr; |
// } |
#if 0 |
if (IS_CPU_MAP(bo->map)) |
kgem_bo_release_map(kgem, bo); |
ptr = bo->map; |
if (ptr == NULL) { |
assert(bytes(bo) <= kgem->aperture_mappable / 4); |
kgem_trim_vma_cache(kgem, MAP_GTT, bucket(bo)); |
ptr = gem_mmap(kgem->fd, bo->handle, bytes(bo), |
PROT_READ | PROT_WRITE); |
if (ptr == NULL) |
return NULL; |
/* Cache this mapping to avoid the overhead of an |
* excruciatingly slow GTT pagefault. This is more an |
* issue with compositing managers which need to frequently |
* flush CPU damage to their GPU bo. |
*/ |
bo->map = ptr; |
DBG(("%s: caching GTT vma for %d\n", __FUNCTION__, bo->handle)); |
} |
if (bo->domain != DOMAIN_GTT) { |
struct drm_i915_gem_set_domain set_domain; |
DBG(("%s: sync: needs_flush? %d, domain? %d\n", __FUNCTION__, |
bo->needs_flush, bo->domain)); |
/* XXX use PROT_READ to avoid the write flush? */ |
VG_CLEAR(set_domain); |
set_domain.handle = bo->handle; |
set_domain.read_domains = I915_GEM_DOMAIN_GTT; |
set_domain.write_domain = I915_GEM_DOMAIN_GTT; |
drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain); |
kgem_bo_retire(kgem, bo); |
bo->domain = DOMAIN_GTT; |
} |
#endif |
return ptr; |
} |
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo) |
{ |
// struct drm_i915_gem_mmap mmap_arg; |
DBG(("%s(handle=%d, size=%d)\n", __FUNCTION__, bo->handle, bytes(bo))); |
assert(!bo->purged); |
assert(list_is_empty(&bo->list)); |
if (IS_CPU_MAP(bo->map)) |
return CPU_MAP(bo->map); |
struct drm_i915_gem_object *obj = (void*)bo->handle; |
u8 *dst; |
int ret; |
if(obj->pin_count == 0) |
{ |
ret = i915_gem_object_pin(obj, 4096, true); |
if (ret) |
return NULL; |
}; |
dst = drm_intel_bo_map(obj, true); |
DBG(("%s: caching CPU vma for %d\n", __FUNCTION__, bo->handle)); |
bo->map = MAKE_CPU_MAP(dst); |
return (void *)dst; |
#if 0 |
if (bo->map) |
kgem_bo_release_map(kgem, bo); |
kgem_trim_vma_cache(kgem, MAP_CPU, bucket(bo)); |
VG_CLEAR(mmap_arg); |
mmap_arg.handle = bo->handle; |
mmap_arg.offset = 0; |
mmap_arg.size = bytes(bo); |
if (drmIoctl(kgem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg)) { |
ErrorF("%s: failed to mmap %d, %d bytes, into CPU domain\n", |
__FUNCTION__, bo->handle, bytes(bo)); |
return NULL; |
} |
VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, bytes(bo), 0, 1)); |
#endif |
} |
void kgem_clear_dirty(struct kgem *kgem) |
{ |
struct kgem_request *rq = kgem->next_request; |
struct kgem_bo *bo; |
list_for_each_entry(bo, &rq->buffers, request) |
bo->dirty = false; |
} |
struct kgem_bo *kgem_create_proxy(struct kgem_bo *target, |
int offset, int length) |
{ |
struct kgem_bo *bo; |
DBG(("%s: target handle=%d, offset=%d, length=%d, io=%d\n", |
__FUNCTION__, target->handle, offset, length, target->io)); |
bo = __kgem_bo_alloc(target->handle, length); |
if (bo == NULL) |
return NULL; |
bo->reusable = false; |
bo->size.bytes = length; |
bo->io = target->io; |
bo->dirty = target->dirty; |
bo->tiling = target->tiling; |
bo->pitch = target->pitch; |
if (target->proxy) { |
offset += target->delta; |
target = target->proxy; |
} |
bo->proxy = kgem_bo_reference(target); |
bo->delta = offset; |
bo->gaddr = offset + target->gaddr; |
return bo; |
} |
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format) |
{ |
struct kgem_bo_binding *b; |
for (b = &bo->binding; b && b->offset; b = b->next) |
if (format == b->format) |
return b->offset; |
return 0; |
} |
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset) |
{ |
struct kgem_bo_binding *b; |
for (b = &bo->binding; b; b = b->next) { |
if (b->offset) |
continue; |
b->offset = offset; |
b->format = format; |
if (b->next) |
b->next->offset = 0; |
return; |
} |
b = malloc(sizeof(*b)); |
if (b) { |
b->next = bo->binding.next; |
b->format = format; |
b->offset = offset; |
bo->binding.next = b; |
} |
} |
struct kgem_bo *create_bo(bitmap_t *bitmap) |
{ |
struct kgem_bo *bo; |
bo = __kgem_bo_alloc(bitmap->obj, 1024*768*4/4096); |
bo->gaddr = bitmap->gaddr; |
bo->pitch = bitmap->pitch; |
bo->tiling = 0; |
return bo; |
}; |
/drivers/video/drm/i915/sna/kgem.h |
---|
0,0 → 1,538 |
/* |
* Copyright (c) 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
* |
* Authors: |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#ifndef KGEM_H |
#define KGEM_H |
#include "compiler.h" |
#include <linux/list.h> |
//#include <stdarg.h> |
#include <i915_drm.h> |
#if DEBUG_KGEM |
#define DBG_HDR(x) ErrorF x |
#else |
#define DBG_HDR(x) |
#endif |
struct kgem_bo { |
struct kgem_bo *proxy; |
struct list_head list; |
struct list_head request; |
struct list_head vma; |
void *map; |
uint32_t gaddr; |
#define IS_CPU_MAP(ptr) ((uintptr_t)(ptr) & 1) |
#define IS_GTT_MAP(ptr) (ptr && ((uintptr_t)(ptr) & 1) == 0) |
struct kgem_request *rq; |
struct drm_i915_gem_exec_object2 *exec; |
struct kgem_bo_binding { |
struct kgem_bo_binding *next; |
uint32_t format; |
uint16_t offset; |
} binding; |
uint32_t unique_id; |
uint32_t refcnt; |
uint32_t handle; |
uint32_t presumed_offset; |
uint32_t delta; |
union { |
struct { |
uint32_t count:27; |
uint32_t bucket:5; |
#define NUM_CACHE_BUCKETS 16 |
#define MAX_CACHE_SIZE (1 << (NUM_CACHE_BUCKETS+12)) |
} pages; |
uint32_t bytes; |
} size; |
uint32_t pitch : 18; /* max 128k */ |
uint32_t tiling : 2; |
uint32_t reusable : 1; |
uint32_t dirty : 1; |
uint32_t domain : 2; |
uint32_t needs_flush : 1; |
uint32_t vmap : 1; |
uint32_t io : 1; |
uint32_t flush : 1; |
uint32_t scanout : 1; |
uint32_t sync : 1; |
uint32_t purged : 1; |
}; |
#define DOMAIN_NONE 0 |
#define DOMAIN_CPU 1 |
#define DOMAIN_GTT 2 |
#define DOMAIN_GPU 3 |
struct kgem_request { |
struct list_head list; |
struct kgem_bo *bo; |
struct list_head buffers; |
}; |
enum { |
MAP_GTT = 0, |
MAP_CPU, |
NUM_MAP_TYPES, |
}; |
struct kgem { |
int fd; |
int wedged; |
int gen; |
uint32_t unique_id; |
enum kgem_mode { |
/* order matches I915_EXEC_RING ordering */ |
KGEM_NONE = 0, |
KGEM_RENDER, |
KGEM_BSD, |
KGEM_BLT, |
} mode, ring; |
struct list_head flushing; |
struct list_head large; |
struct list_head active[NUM_CACHE_BUCKETS][3]; |
struct list_head inactive[NUM_CACHE_BUCKETS]; |
struct list_head partial; |
struct list_head requests; |
struct kgem_request *next_request; |
struct { |
struct list_head inactive[NUM_CACHE_BUCKETS]; |
int16_t count; |
} vma[NUM_MAP_TYPES]; |
uint16_t nbatch; |
uint16_t surface; |
uint16_t nexec; |
uint16_t nreloc; |
uint16_t nfence; |
uint16_t max_batch_size; |
uint32_t flush:1; |
uint32_t sync:1; |
uint32_t need_expire:1; |
uint32_t need_purge:1; |
uint32_t need_retire:1; |
uint32_t scanout:1; |
uint32_t flush_now:1; |
uint32_t busy:1; |
uint32_t has_vmap :1; |
uint32_t has_relaxed_fencing :1; |
uint32_t has_semaphores :1; |
uint32_t has_llc :1; |
uint32_t has_cpu_bo :1; |
uint16_t fence_max; |
uint16_t half_cpu_cache_pages; |
uint32_t aperture_total, aperture_high, aperture_low, aperture_mappable; |
uint32_t aperture, aperture_fenced; |
uint32_t min_alignment; |
uint32_t max_upload_tile_size, max_copy_tile_size; |
uint32_t max_gpu_size, max_cpu_size; |
uint32_t large_object_size, max_object_size; |
uint32_t partial_buffer_size; |
// void (*context_switch)(struct kgem *kgem, int new_mode); |
void (*retire)(struct kgem *kgem); |
uint32_t *batch; |
uint32_t *batch_ptr; |
int batch_idx; |
struct drm_i915_gem_object *batch_obj; |
struct drm_i915_gem_exec_object2 exec[256]; |
struct drm_i915_gem_relocation_entry reloc[384]; |
}; |
typedef struct |
{ |
struct drm_i915_gem_object *batch; |
struct list_head objects; |
u32 exec_start; |
u32 exec_len; |
}batchbuffer_t; |
#define KGEM_BATCH_RESERVED 1 |
#define KGEM_RELOC_RESERVED 4 |
#define KGEM_EXEC_RESERVED 1 |
#define KGEM_BATCH_SIZE(K) ((K)->max_batch_size-KGEM_BATCH_RESERVED) |
#define KGEM_EXEC_SIZE(K) (int)(ARRAY_SIZE((K)->exec)-KGEM_EXEC_RESERVED) |
#define KGEM_RELOC_SIZE(K) (int)(ARRAY_SIZE((K)->reloc)-KGEM_RELOC_RESERVED) |
void kgem_init(struct kgem *kgem, int gen); |
void kgem_reset(struct kgem *kgem); |
struct kgem_bo *kgem_create_map(struct kgem *kgem, |
void *ptr, uint32_t size, |
bool read_only); |
struct kgem_bo *kgem_create_for_name(struct kgem *kgem, uint32_t name); |
struct kgem_bo *kgem_create_linear(struct kgem *kgem, int size); |
struct kgem_bo *kgem_create_proxy(struct kgem_bo *target, |
int offset, int length); |
//struct kgem_bo *kgem_upload_source_image(struct kgem *kgem, |
// const void *data, |
// BoxPtr box, |
// int stride, int bpp); |
int kgem_choose_tiling(struct kgem *kgem, |
int tiling, int width, int height, int bpp); |
unsigned kgem_can_create_2d(struct kgem *kgem, int width, int height, int depth); |
#define KGEM_CAN_CREATE_GPU 0x1 |
#define KGEM_CAN_CREATE_CPU 0x2 |
#define KGEM_CAN_CREATE_LARGE 0x4 |
struct kgem_bo * |
kgem_replace_bo(struct kgem *kgem, |
struct kgem_bo *src, |
uint32_t width, |
uint32_t height, |
uint32_t pitch, |
uint32_t bpp); |
enum { |
CREATE_EXACT = 0x1, |
CREATE_INACTIVE = 0x2, |
CREATE_CPU_MAP = 0x4, |
CREATE_GTT_MAP = 0x8, |
CREATE_SCANOUT = 0x10, |
}; |
struct kgem_bo *kgem_create_2d(struct kgem *kgem, |
int width, |
int height, |
int bpp, |
int tiling, |
uint32_t flags); |
uint32_t kgem_bo_get_binding(struct kgem_bo *bo, uint32_t format); |
void kgem_bo_set_binding(struct kgem_bo *bo, uint32_t format, uint16_t offset); |
bool kgem_retire(struct kgem *kgem); |
void _kgem_submit(struct kgem *kgem, batchbuffer_t *exb); |
//static inline void kgem_submit(struct kgem *kgem) |
//{ |
// if (kgem->nbatch) |
// _kgem_submit(kgem); |
//} |
/* |
static inline void kgem_bo_submit(struct kgem *kgem, struct kgem_bo *bo) |
{ |
if (bo->exec) |
_kgem_submit(kgem); |
} |
void __kgem_flush(struct kgem *kgem, struct kgem_bo *bo); |
static inline void kgem_bo_flush(struct kgem *kgem, struct kgem_bo *bo) |
{ |
kgem_bo_submit(kgem, bo); |
if (!bo->needs_flush) |
return; |
__kgem_flush(kgem, bo); |
bo->needs_flush = false; |
} |
*/ |
static inline struct kgem_bo *kgem_bo_reference(struct kgem_bo *bo) |
{ |
bo->refcnt++; |
return bo; |
} |
void _kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo); |
static inline void kgem_bo_destroy(struct kgem *kgem, struct kgem_bo *bo) |
{ |
assert(bo->refcnt); |
if (--bo->refcnt == 0) |
_kgem_bo_destroy(kgem, bo); |
} |
void kgem_clear_dirty(struct kgem *kgem); |
static inline void kgem_set_mode(struct kgem *kgem, enum kgem_mode mode) |
{ |
assert(!kgem->wedged); |
#if DEBUG_FLUSH_BATCH |
kgem_submit(kgem); |
#endif |
if (kgem->mode == mode) |
return; |
// kgem->context_switch(kgem, mode); |
kgem->mode = mode; |
} |
static inline void _kgem_set_mode(struct kgem *kgem, enum kgem_mode mode) |
{ |
assert(kgem->mode == KGEM_NONE); |
// kgem->context_switch(kgem, mode); |
kgem->mode = mode; |
} |
static inline bool kgem_check_batch(struct kgem *kgem, int num_dwords) |
{ |
return likely(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED <= kgem->surface); |
} |
static inline bool kgem_check_reloc(struct kgem *kgem, int n) |
{ |
return likely(kgem->nreloc + n <= KGEM_RELOC_SIZE(kgem)); |
} |
static inline bool kgem_check_exec(struct kgem *kgem, int n) |
{ |
return likely(kgem->nexec + n <= KGEM_EXEC_SIZE(kgem)); |
} |
static inline bool kgem_check_batch_with_surfaces(struct kgem *kgem, |
int num_dwords, |
int num_surfaces) |
{ |
return (int)(kgem->nbatch + num_dwords + KGEM_BATCH_RESERVED) <= (int)(kgem->surface - num_surfaces*8) && |
kgem_check_reloc(kgem, num_surfaces); |
} |
static inline uint32_t *kgem_get_batch(struct kgem *kgem, int num_dwords) |
{ |
// if (!kgem_check_batch(kgem, num_dwords)) |
// _kgem_submit(kgem); |
return kgem->batch + kgem->nbatch; |
} |
static inline void kgem_advance_batch(struct kgem *kgem, int num_dwords) |
{ |
kgem->nbatch += num_dwords; |
} |
bool kgem_check_bo(struct kgem *kgem, ...) __attribute__((sentinel(0))); |
bool kgem_check_bo_fenced(struct kgem *kgem, ...) __attribute__((sentinel(0))); |
void _kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo); |
static inline void kgem_add_bo(struct kgem *kgem, struct kgem_bo *bo) |
{ |
if (bo->proxy) |
bo = bo->proxy; |
if (bo->exec == NULL) |
_kgem_add_bo(kgem, bo); |
} |
#define KGEM_RELOC_FENCED 0x8000 |
uint32_t kgem_add_reloc(struct kgem *kgem, |
uint32_t pos, |
struct kgem_bo *bo, |
uint32_t read_write_domains, |
uint32_t delta); |
void *kgem_bo_map(struct kgem *kgem, struct kgem_bo *bo); |
void *kgem_bo_map__debug(struct kgem *kgem, struct kgem_bo *bo); |
void *kgem_bo_map__cpu(struct kgem *kgem, struct kgem_bo *bo); |
void kgem_bo_sync__cpu(struct kgem *kgem, struct kgem_bo *bo); |
uint32_t kgem_bo_flink(struct kgem *kgem, struct kgem_bo *bo); |
Bool kgem_bo_write(struct kgem *kgem, struct kgem_bo *bo, |
const void *data, int length); |
int kgem_bo_fenced_size(struct kgem *kgem, struct kgem_bo *bo); |
void kgem_get_tile_size(struct kgem *kgem, int tiling, |
int *tile_width, int *tile_height, int *tile_size); |
static inline int kgem_bo_size(struct kgem_bo *bo) |
{ |
assert(!(bo->proxy && bo->io)); |
return PAGE_SIZE * bo->size.pages.count; |
} |
static inline int kgem_buffer_size(struct kgem_bo *bo) |
{ |
assert(bo->proxy && bo->io); |
return bo->size.bytes; |
} |
/* |
static inline bool kgem_bo_blt_pitch_is_ok(struct kgem *kgem, |
struct kgem_bo *bo) |
{ |
int pitch = bo->pitch; |
if (kgem->gen >= 40 && bo->tiling) |
pitch /= 4; |
if (pitch > MAXSHORT) { |
DBG(("%s: can not blt to handle=%d, adjusted pitch=%d\n", |
__FUNCTION__, pitch)); |
return false; |
} |
return true; |
} |
static inline bool kgem_bo_can_blt(struct kgem *kgem, |
struct kgem_bo *bo) |
{ |
if (bo->tiling == I915_TILING_Y) { |
DBG(("%s: can not blt to handle=%d, tiling=Y\n", |
__FUNCTION__, bo->handle)); |
return false; |
} |
return kgem_bo_blt_pitch_is_ok(kgem, bo); |
} |
*/ |
static inline bool kgem_bo_is_mappable(struct kgem *kgem, |
struct kgem_bo *bo) |
{ |
DBG_HDR(("%s: domain=%d, offset: %d size: %d\n", |
__FUNCTION__, bo->domain, bo->presumed_offset, kgem_bo_size(bo))); |
if (bo->domain == DOMAIN_GTT) |
return true; |
if (IS_GTT_MAP(bo->map)) |
return true; |
if (kgem->gen < 40 && bo->tiling && |
bo->presumed_offset & (kgem_bo_fenced_size(kgem, bo) - 1)) |
return false; |
if (!bo->presumed_offset) |
return kgem_bo_size(bo) <= kgem->aperture_mappable / 4; |
return bo->presumed_offset + kgem_bo_size(bo) <= kgem->aperture_mappable; |
} |
static inline bool kgem_bo_mapped(struct kgem_bo *bo) |
{ |
DBG_HDR(("%s: map=%p, tiling=%d\n", __FUNCTION__, bo->map, bo->tiling)); |
if (bo->map == NULL) |
return false; |
return IS_CPU_MAP(bo->map) == !bo->tiling; |
} |
static inline bool kgem_bo_is_busy(struct kgem_bo *bo) |
{ |
DBG_HDR(("%s: domain: %d exec? %d, rq? %d\n", |
__FUNCTION__, bo->domain, bo->exec != NULL, bo->rq != NULL)); |
assert(bo->proxy == NULL); |
return bo->rq; |
} |
/* |
static inline bool kgem_bo_map_will_stall(struct kgem *kgem, struct kgem_bo *bo) |
{ |
DBG(("%s? handle=%d, domain=%d, offset=%x, size=%x\n", |
__FUNCTION__, bo->handle, |
bo->domain, bo->presumed_offset, bo->size)); |
if (!kgem_bo_is_mappable(kgem, bo)) |
return true; |
if (kgem->wedged) |
return false; |
if (kgem_bo_is_busy(bo)) |
return true; |
if (bo->presumed_offset == 0) |
return !list_is_empty(&kgem->requests); |
return false; |
} |
*/ |
static inline bool kgem_bo_is_dirty(struct kgem_bo *bo) |
{ |
if (bo == NULL) |
return FALSE; |
return bo->dirty; |
} |
static inline void kgem_bo_mark_dirty(struct kgem_bo *bo) |
{ |
DBG_HDR(("%s: handle=%d\n", __FUNCTION__, bo->handle)); |
bo->dirty = true; |
} |
void kgem_sync(struct kgem *kgem); |
#define KGEM_BUFFER_WRITE 0x1 |
#define KGEM_BUFFER_INPLACE 0x2 |
#define KGEM_BUFFER_LAST 0x4 |
#define KGEM_BUFFER_WRITE_INPLACE (KGEM_BUFFER_WRITE | KGEM_BUFFER_INPLACE) |
struct kgem_bo *kgem_create_buffer(struct kgem *kgem, |
uint32_t size, uint32_t flags, |
void **ret); |
struct kgem_bo *kgem_create_buffer_2d(struct kgem *kgem, |
int width, int height, int bpp, |
uint32_t flags, |
void **ret); |
void kgem_buffer_read_sync(struct kgem *kgem, struct kgem_bo *bo); |
void kgem_throttle(struct kgem *kgem); |
#define MAX_INACTIVE_TIME 10 |
bool kgem_expire_cache(struct kgem *kgem); |
void kgem_purge_cache(struct kgem *kgem); |
void kgem_cleanup_cache(struct kgem *kgem); |
#if HAS_EXTRA_DEBUG |
void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch); |
#else |
static inline void __kgem_batch_debug(struct kgem *kgem, uint32_t nbatch) |
{ |
(void)kgem; |
(void)nbatch; |
} |
#endif |
#undef DBG_HDR |
u32 get_buffer_offset(uint32_t handle); |
#endif /* KGEM_H */ |
/drivers/video/drm/i915/sna/compiler.h |
---|
0,0 → 1,55 |
/* |
* Copyright (c) 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
* |
* Authors: |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#ifndef _SNA_COMPILER_H_ |
#define _SNA_COMPILER_H_ |
#if defined(__GNUC__) && (__GNUC__ > 2) && defined(__OPTIMIZE__) |
#define noinline __attribute__((noinline)) |
#define fastcall __attribute__((regparm(3))) |
#define must_check __attribute__((warn_unused_result)) |
#define constant __attribute__((const)) |
#else |
#define likely(expr) (expr) |
#define unlikely(expr) (expr) |
#define noinline |
#define fastcall |
#define must_check |
#define constant |
#endif |
#ifdef HAVE_VALGRIND |
#define VG(x) x |
#else |
#define VG(x) |
#endif |
#define VG_CLEAR(s) VG(memset(&s, 0, sizeof(s))) |
#define COMPILE_TIME_ASSERT(E) ((void)sizeof(char[1 - 2*!(E)])) |
#endif /* _SNA_COMPILER_H_ */ |
/drivers/video/drm/i915/sna/gen6_render.h |
---|
0,0 → 1,1585 |
#ifndef GEN6_RENDER_H |
#define GEN6_RENDER_H |
#define GEN6_MASK(high, low) (((1 << ((high) - (low) + 1)) - 1) << (low)) |
#define GEN6_3D(Pipeline,Opcode,Subopcode) ((3 << 29) | \ |
((Pipeline) << 27) | \ |
((Opcode) << 24) | \ |
((Subopcode) << 16)) |
#define GEN6_STATE_BASE_ADDRESS GEN6_3D(0, 1, 1) |
#define GEN6_STATE_SIP GEN6_3D(0, 1, 2) |
#define GEN6_PIPELINE_SELECT GEN6_3D(1, 1, 4) |
#define GEN6_MEDIA_STATE_POINTERS GEN6_3D(2, 0, 0) |
#define GEN6_MEDIA_OBJECT GEN6_3D(2, 1, 0) |
#define GEN6_3DSTATE_BINDING_TABLE_POINTERS GEN6_3D(3, 0, 1) |
# define GEN6_3DSTATE_BINDING_TABLE_MODIFY_PS (1 << 12)/* for GEN6 */ |
# define GEN6_3DSTATE_BINDING_TABLE_MODIFY_GS (1 << 9) /* for GEN6 */ |
# define GEN6_3DSTATE_BINDING_TABLE_MODIFY_VS (1 << 8) /* for GEN6 */ |
#define GEN6_3DSTATE_VERTEX_BUFFERS GEN6_3D(3, 0, 8) |
#define GEN6_3DSTATE_VERTEX_ELEMENTS GEN6_3D(3, 0, 9) |
#define GEN6_3DSTATE_INDEX_BUFFER GEN6_3D(3, 0, 0xa) |
#define GEN6_3DSTATE_VF_STATISTICS GEN6_3D(3, 0, 0xb) |
#define GEN6_3DSTATE_DRAWING_RECTANGLE GEN6_3D(3, 1, 0) |
#define GEN6_3DSTATE_CONSTANT_COLOR GEN6_3D(3, 1, 1) |
#define GEN6_3DSTATE_SAMPLER_PALETTE_LOAD GEN6_3D(3, 1, 2) |
#define GEN6_3DSTATE_CHROMA_KEY GEN6_3D(3, 1, 4) |
#define GEN6_3DSTATE_DEPTH_BUFFER GEN6_3D(3, 1, 5) |
# define GEN6_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT 29 |
# define GEN6_3DSTATE_DEPTH_BUFFER_FORMAT_SHIFT 18 |
#define GEN6_3DSTATE_POLY_STIPPLE_OFFSET GEN6_3D(3, 1, 6) |
#define GEN6_3DSTATE_POLY_STIPPLE_PATTERN GEN6_3D(3, 1, 7) |
#define GEN6_3DSTATE_LINE_STIPPLE GEN6_3D(3, 1, 8) |
#define GEN6_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP GEN6_3D(3, 1, 9) |
/* These two are BLC and CTG only, not BW or CL */ |
#define GEN6_3DSTATE_AA_LINE_PARAMS GEN6_3D(3, 1, 0xa) |
#define GEN6_3DSTATE_GS_SVB_INDEX GEN6_3D(3, 1, 0xb) |
#define GEN6_3DPRIMITIVE GEN6_3D(3, 3, 0) |
#define GEN6_3DSTATE_CLEAR_PARAMS GEN6_3D(3, 1, 0x10) |
/* DW1 */ |
# define GEN6_3DSTATE_DEPTH_CLEAR_VALID (1 << 15) |
#define GEN6_3DSTATE_SAMPLER_STATE_POINTERS GEN6_3D(3, 0, 0x02) |
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS (1 << 12) |
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_GS (1 << 9) |
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_VS (1 << 8) |
#define GEN6_3DSTATE_URB GEN6_3D(3, 0, 0x05) |
/* DW1 */ |
# define GEN6_3DSTATE_URB_VS_SIZE_SHIFT 16 |
# define GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT 0 |
/* DW2 */ |
# define GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT 8 |
# define GEN6_3DSTATE_URB_GS_SIZE_SHIFT 0 |
#define GEN6_3DSTATE_VIEWPORT_STATE_POINTERS GEN6_3D(3, 0, 0x0d) |
# define GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC (1 << 12) |
# define GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_SF (1 << 11) |
# define GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CLIP (1 << 10) |
#define GEN6_3DSTATE_CC_STATE_POINTERS GEN6_3D(3, 0, 0x0e) |
#define GEN6_3DSTATE_VS GEN6_3D(3, 0, 0x10) |
#define GEN6_3DSTATE_GS GEN6_3D(3, 0, 0x11) |
/* DW4 */ |
# define GEN6_3DSTATE_GS_DISPATCH_START_GRF_SHIFT 0 |
#define GEN6_3DSTATE_CLIP GEN6_3D(3, 0, 0x12) |
#define GEN6_3DSTATE_SF GEN6_3D(3, 0, 0x13) |
/* DW1 */ |
# define GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT 22 |
# define GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT 11 |
# define GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT 4 |
/* DW2 */ |
/* DW3 */ |
# define GEN6_3DSTATE_SF_CULL_BOTH (0 << 29) |
# define GEN6_3DSTATE_SF_CULL_NONE (1 << 29) |
# define GEN6_3DSTATE_SF_CULL_FRONT (2 << 29) |
# define GEN6_3DSTATE_SF_CULL_BACK (3 << 29) |
/* DW4 */ |
# define GEN6_3DSTATE_SF_TRI_PROVOKE_SHIFT 29 |
# define GEN6_3DSTATE_SF_LINE_PROVOKE_SHIFT 27 |
# define GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT 25 |
#define GEN6_3DSTATE_WM GEN6_3D(3, 0, 0x14) |
/* DW2 */ |
# define GEN6_3DSTATE_WM_SAMPLER_COUNT_SHIFT 27 |
# define GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT 18 |
/* DW4 */ |
# define GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT 16 |
/* DW5 */ |
# define GEN6_3DSTATE_WM_MAX_THREADS_SHIFT 25 |
# define GEN6_3DSTATE_WM_DISPATCH_ENABLE (1 << 19) |
# define GEN6_3DSTATE_WM_16_DISPATCH_ENABLE (1 << 1) |
# define GEN6_3DSTATE_WM_8_DISPATCH_ENABLE (1 << 0) |
/* DW6 */ |
# define GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT 20 |
# define GEN6_3DSTATE_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC (1 << 15) |
# define GEN6_3DSTATE_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC (1 << 14) |
# define GEN6_3DSTATE_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC (1 << 13) |
# define GEN6_3DSTATE_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC (1 << 12) |
# define GEN6_3DSTATE_WM_PERSPECTIVE_CENTROID_BARYCENTRIC (1 << 11) |
# define GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC (1 << 10) |
#define GEN6_3DSTATE_CONSTANT_VS GEN6_3D(3, 0, 0x15) |
#define GEN6_3DSTATE_CONSTANT_GS GEN6_3D(3, 0, 0x16) |
#define GEN6_3DSTATE_CONSTANT_PS GEN6_3D(3, 0, 0x17) |
#define GEN6_3DSTATE_SAMPLE_MASK GEN6_3D(3, 0, 0x18) |
#define GEN6_3DSTATE_MULTISAMPLE GEN6_3D(3, 1, 0x0d) |
/* DW1 */ |
# define GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER (0 << 4) |
# define GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_UPPER_LEFT (1 << 4) |
# define GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1 (0 << 1) |
# define GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_4 (2 << 1) |
# define GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_8 (3 << 1) |
#define PIPELINE_SELECT_3D 0 |
#define PIPELINE_SELECT_MEDIA 1 |
/* for GEN6_STATE_BASE_ADDRESS */ |
#define BASE_ADDRESS_MODIFY (1 << 0) |
/* VERTEX_BUFFER_STATE Structure */ |
#define VB0_BUFFER_INDEX_SHIFT 26 |
#define VB0_VERTEXDATA (0 << 20) |
#define VB0_INSTANCEDATA (1 << 20) |
#define VB0_BUFFER_PITCH_SHIFT 0 |
/* VERTEX_ELEMENT_STATE Structure */ |
#define VE0_VERTEX_BUFFER_INDEX_SHIFT 26 /* for GEN6 */ |
#define VE0_VALID (1 << 25) /* for GEN6 */ |
#define VE0_FORMAT_SHIFT 16 |
#define VE0_OFFSET_SHIFT 0 |
#define VE1_VFCOMPONENT_0_SHIFT 28 |
#define VE1_VFCOMPONENT_1_SHIFT 24 |
#define VE1_VFCOMPONENT_2_SHIFT 20 |
#define VE1_VFCOMPONENT_3_SHIFT 16 |
#define VE1_DESTINATION_ELEMENT_OFFSET_SHIFT 0 |
/* 3DPRIMITIVE bits */ |
#define GEN6_3DPRIMITIVE_VERTEX_SEQUENTIAL (0 << 15) |
#define GEN6_3DPRIMITIVE_VERTEX_RANDOM (1 << 15) |
/* Primitive types are in gen6_defines.h */ |
#define GEN6_3DPRIMITIVE_TOPOLOGY_SHIFT 10 |
#define GEN6_SVG_CTL 0x7400 |
#define GEN6_SVG_CTL_GS_BA (0 << 8) |
#define GEN6_SVG_CTL_SS_BA (1 << 8) |
#define GEN6_SVG_CTL_IO_BA (2 << 8) |
#define GEN6_SVG_CTL_GS_AUB (3 << 8) |
#define GEN6_SVG_CTL_IO_AUB (4 << 8) |
#define GEN6_SVG_CTL_SIP (5 << 8) |
#define GEN6_SVG_RDATA 0x7404 |
#define GEN6_SVG_WORK_CTL 0x7408 |
#define GEN6_VF_CTL 0x7500 |
#define GEN6_VF_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_VF_CTL_SNAPSHOT_MUX_SELECT_THREADID (0 << 8) |
#define GEN6_VF_CTL_SNAPSHOT_MUX_SELECT_VF_DEBUG (1 << 8) |
#define GEN6_VF_CTL_SNAPSHOT_TYPE_VERTEX_SEQUENCE (0 << 4) |
#define GEN6_VF_CTL_SNAPSHOT_TYPE_VERTEX_INDEX (1 << 4) |
#define GEN6_VF_CTL_SKIP_INITIAL_PRIMITIVES (1 << 3) |
#define GEN6_VF_CTL_MAX_PRIMITIVES_LIMIT_ENABLE (1 << 2) |
#define GEN6_VF_CTL_VERTEX_RANGE_LIMIT_ENABLE (1 << 1) |
#define GEN6_VF_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_VF_STRG_VAL 0x7504 |
#define GEN6_VF_STR_VL_OVR 0x7508 |
#define GEN6_VF_VC_OVR 0x750c |
#define GEN6_VF_STR_PSKIP 0x7510 |
#define GEN6_VF_MAX_PRIM 0x7514 |
#define GEN6_VF_RDATA 0x7518 |
#define GEN6_VS_CTL 0x7600 |
#define GEN6_VS_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VERTEX_0 (0 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VERTEX_1 (1 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VALID_COUNT (2 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VS_KERNEL_POINTER (3 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_ALL_THREADS (1 << 2) |
#define GEN6_VS_CTL_THREAD_SNAPSHOT_ENABLE (1 << 1) |
#define GEN6_VS_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_VS_STRG_VAL 0x7604 |
#define GEN6_VS_RDATA 0x7608 |
#define GEN6_SF_CTL 0x7b00 |
#define GEN6_SF_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_0_FF_ID (0 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_0_REL_COUNT (1 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_1_FF_ID (2 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_1_REL_COUNT (3 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_2_FF_ID (4 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_2_REL_COUNT (5 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_COUNT (6 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_SF_KERNEL_POINTER (7 << 8) |
#define GEN6_SF_CTL_MIN_MAX_PRIMITIVE_RANGE_ENABLE (1 << 4) |
#define GEN6_SF_CTL_DEBUG_CLIP_RECTANGLE_ENABLE (1 << 3) |
#define GEN6_SF_CTL_SNAPSHOT_ALL_THREADS (1 << 2) |
#define GEN6_SF_CTL_THREAD_SNAPSHOT_ENABLE (1 << 1) |
#define GEN6_SF_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_SF_STRG_VAL 0x7b04 |
#define GEN6_SF_RDATA 0x7b18 |
#define GEN6_WIZ_CTL 0x7c00 |
#define GEN6_WIZ_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_WIZ_CTL_SUBSPAN_INSTANCE_SHIFT 16 |
#define GEN6_WIZ_CTL_SNAPSHOT_MUX_WIZ_KERNEL_POINTER (0 << 8) |
#define GEN6_WIZ_CTL_SNAPSHOT_MUX_SUBSPAN_INSTANCE (1 << 8) |
#define GEN6_WIZ_CTL_SNAPSHOT_MUX_PRIMITIVE_SEQUENCE (2 << 8) |
#define GEN6_WIZ_CTL_SINGLE_SUBSPAN_DISPATCH (1 << 6) |
#define GEN6_WIZ_CTL_IGNORE_COLOR_SCOREBOARD_STALLS (1 << 5) |
#define GEN6_WIZ_CTL_ENABLE_SUBSPAN_INSTANCE_COMPARE (1 << 4) |
#define GEN6_WIZ_CTL_USE_UPSTREAM_SNAPSHOT_FLAG (1 << 3) |
#define GEN6_WIZ_CTL_SNAPSHOT_ALL_THREADS (1 << 2) |
#define GEN6_WIZ_CTL_THREAD_SNAPSHOT_ENABLE (1 << 1) |
#define GEN6_WIZ_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_WIZ_STRG_VAL 0x7c04 |
#define GEN6_WIZ_RDATA 0x7c18 |
#define GEN6_TS_CTL 0x7e00 |
#define GEN6_TS_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_TS_CTL_SNAPSHOT_MESSAGE_ERROR (0 << 8) |
#define GEN6_TS_CTL_SNAPSHOT_INTERFACE_DESCRIPTOR (3 << 8) |
#define GEN6_TS_CTL_SNAPSHOT_ALL_CHILD_THREADS (1 << 2) |
#define GEN6_TS_CTL_SNAPSHOT_ALL_ROOT_THREADS (1 << 1) |
#define GEN6_TS_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_TS_STRG_VAL 0x7e04 |
#define GEN6_TS_RDATA 0x7e08 |
#define GEN6_TD_CTL 0x8000 |
#define GEN6_TD_CTL_MUX_SHIFT 8 |
#define GEN6_TD_CTL_EXTERNAL_HALT_R0_DEBUG_MATCH (1 << 7) |
#define GEN6_TD_CTL_FORCE_EXTERNAL_HALT (1 << 6) |
#define GEN6_TD_CTL_EXCEPTION_MASK_OVERRIDE (1 << 5) |
#define GEN6_TD_CTL_FORCE_THREAD_BREAKPOINT_ENABLE (1 << 4) |
#define GEN6_TD_CTL_BREAKPOINT_ENABLE (1 << 2) |
#define GEN6_TD_CTL2 0x8004 |
#define GEN6_TD_CTL2_ILLEGAL_OPCODE_EXCEPTION_OVERRIDE (1 << 28) |
#define GEN6_TD_CTL2_MASKSTACK_EXCEPTION_OVERRIDE (1 << 26) |
#define GEN6_TD_CTL2_SOFTWARE_EXCEPTION_OVERRIDE (1 << 25) |
#define GEN6_TD_CTL2_ACTIVE_THREAD_LIMIT_SHIFT 16 |
#define GEN6_TD_CTL2_ACTIVE_THREAD_LIMIT_ENABLE (1 << 8) |
#define GEN6_TD_CTL2_THREAD_SPAWNER_EXECUTION_MASK_ENABLE (1 << 7) |
#define GEN6_TD_CTL2_WIZ_EXECUTION_MASK_ENABLE (1 << 6) |
#define GEN6_TD_CTL2_SF_EXECUTION_MASK_ENABLE (1 << 5) |
#define GEN6_TD_CTL2_CLIPPER_EXECUTION_MASK_ENABLE (1 << 4) |
#define GEN6_TD_CTL2_GS_EXECUTION_MASK_ENABLE (1 << 3) |
#define GEN6_TD_CTL2_VS_EXECUTION_MASK_ENABLE (1 << 0) |
#define GEN6_TD_VF_VS_EMSK 0x8008 |
#define GEN6_TD_GS_EMSK 0x800c |
#define GEN6_TD_CLIP_EMSK 0x8010 |
#define GEN6_TD_SF_EMSK 0x8014 |
#define GEN6_TD_WIZ_EMSK 0x8018 |
#define GEN6_TD_0_6_EHTRG_VAL 0x801c |
#define GEN6_TD_0_7_EHTRG_VAL 0x8020 |
#define GEN6_TD_0_6_EHTRG_MSK 0x8024 |
#define GEN6_TD_0_7_EHTRG_MSK 0x8028 |
#define GEN6_TD_RDATA 0x802c |
#define GEN6_TD_TS_EMSK 0x8030 |
#define GEN6_EU_CTL 0x8800 |
#define GEN6_EU_CTL_SELECT_SHIFT 16 |
#define GEN6_EU_CTL_DATA_MUX_SHIFT 8 |
#define GEN6_EU_ATT_0 0x8810 |
#define GEN6_EU_ATT_1 0x8814 |
#define GEN6_EU_ATT_DATA_0 0x8820 |
#define GEN6_EU_ATT_DATA_1 0x8824 |
#define GEN6_EU_ATT_CLR_0 0x8830 |
#define GEN6_EU_ATT_CLR_1 0x8834 |
#define GEN6_EU_RDATA 0x8840 |
#define GEN6_3D(Pipeline,Opcode,Subopcode) ((3 << 29) | \ |
((Pipeline) << 27) | \ |
((Opcode) << 24) | \ |
((Subopcode) << 16)) |
#define GEN6_STATE_BASE_ADDRESS GEN6_3D(0, 1, 1) |
#define GEN6_STATE_SIP GEN6_3D(0, 1, 2) |
#define GEN6_PIPELINE_SELECT GEN6_3D(1, 1, 4) |
#define GEN6_MEDIA_STATE_POINTERS GEN6_3D(2, 0, 0) |
#define GEN6_MEDIA_OBJECT GEN6_3D(2, 1, 0) |
#define GEN6_3DSTATE_BINDING_TABLE_POINTERS GEN6_3D(3, 0, 1) |
# define GEN6_3DSTATE_BINDING_TABLE_MODIFY_PS (1 << 12)/* for GEN6 */ |
# define GEN6_3DSTATE_BINDING_TABLE_MODIFY_GS (1 << 9) /* for GEN6 */ |
# define GEN6_3DSTATE_BINDING_TABLE_MODIFY_VS (1 << 8) /* for GEN6 */ |
#define GEN6_3DSTATE_VERTEX_BUFFERS GEN6_3D(3, 0, 8) |
#define GEN6_3DSTATE_VERTEX_ELEMENTS GEN6_3D(3, 0, 9) |
#define GEN6_3DSTATE_INDEX_BUFFER GEN6_3D(3, 0, 0xa) |
#define GEN6_3DSTATE_VF_STATISTICS GEN6_3D(3, 0, 0xb) |
#define GEN6_3DSTATE_DRAWING_RECTANGLE GEN6_3D(3, 1, 0) |
#define GEN6_3DSTATE_CONSTANT_COLOR GEN6_3D(3, 1, 1) |
#define GEN6_3DSTATE_SAMPLER_PALETTE_LOAD GEN6_3D(3, 1, 2) |
#define GEN6_3DSTATE_CHROMA_KEY GEN6_3D(3, 1, 4) |
#define GEN6_3DSTATE_DEPTH_BUFFER GEN6_3D(3, 1, 5) |
# define GEN6_3DSTATE_DEPTH_BUFFER_TYPE_SHIFT 29 |
# define GEN6_3DSTATE_DEPTH_BUFFER_FORMAT_SHIFT 18 |
#define GEN6_3DSTATE_POLY_STIPPLE_OFFSET GEN6_3D(3, 1, 6) |
#define GEN6_3DSTATE_POLY_STIPPLE_PATTERN GEN6_3D(3, 1, 7) |
#define GEN6_3DSTATE_LINE_STIPPLE GEN6_3D(3, 1, 8) |
#define GEN6_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP GEN6_3D(3, 1, 9) |
/* These two are BLC and CTG only, not BW or CL */ |
#define GEN6_3DSTATE_AA_LINE_PARAMS GEN6_3D(3, 1, 0xa) |
#define GEN6_3DSTATE_GS_SVB_INDEX GEN6_3D(3, 1, 0xb) |
#define GEN6_3DPRIMITIVE GEN6_3D(3, 3, 0) |
#define GEN6_3DSTATE_CLEAR_PARAMS GEN6_3D(3, 1, 0x10) |
/* DW1 */ |
# define GEN6_3DSTATE_DEPTH_CLEAR_VALID (1 << 15) |
/* for GEN6+ */ |
#define GEN6_3DSTATE_SAMPLER_STATE_POINTERS GEN6_3D(3, 0, 0x02) |
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_PS (1 << 12) |
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_GS (1 << 9) |
# define GEN6_3DSTATE_SAMPLER_STATE_MODIFY_VS (1 << 8) |
#define GEN6_3DSTATE_URB GEN6_3D(3, 0, 0x05) |
/* DW1 */ |
# define GEN6_3DSTATE_URB_VS_SIZE_SHIFT 16 |
# define GEN6_3DSTATE_URB_VS_ENTRIES_SHIFT 0 |
/* DW2 */ |
# define GEN6_3DSTATE_URB_GS_ENTRIES_SHIFT 8 |
# define GEN6_3DSTATE_URB_GS_SIZE_SHIFT 0 |
#define GEN6_3DSTATE_VIEWPORT_STATE_POINTERS GEN6_3D(3, 0, 0x0d) |
# define GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CC (1 << 12) |
# define GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_SF (1 << 11) |
# define GEN6_3DSTATE_VIEWPORT_STATE_MODIFY_CLIP (1 << 10) |
#define GEN6_3DSTATE_CC_STATE_POINTERS GEN6_3D(3, 0, 0x0e) |
#define GEN6_3DSTATE_VS GEN6_3D(3, 0, 0x10) |
#define GEN6_3DSTATE_GS GEN6_3D(3, 0, 0x11) |
/* DW4 */ |
# define GEN6_3DSTATE_GS_DISPATCH_START_GRF_SHIFT 0 |
#define GEN6_3DSTATE_CLIP GEN6_3D(3, 0, 0x12) |
#define GEN6_3DSTATE_SF GEN6_3D(3, 0, 0x13) |
/* DW1 */ |
# define GEN6_3DSTATE_SF_NUM_OUTPUTS_SHIFT 22 |
# define GEN6_3DSTATE_SF_URB_ENTRY_READ_LENGTH_SHIFT 11 |
# define GEN6_3DSTATE_SF_URB_ENTRY_READ_OFFSET_SHIFT 4 |
/* DW2 */ |
/* DW3 */ |
# define GEN6_3DSTATE_SF_CULL_BOTH (0 << 29) |
# define GEN6_3DSTATE_SF_CULL_NONE (1 << 29) |
# define GEN6_3DSTATE_SF_CULL_FRONT (2 << 29) |
# define GEN6_3DSTATE_SF_CULL_BACK (3 << 29) |
/* DW4 */ |
# define GEN6_3DSTATE_SF_TRI_PROVOKE_SHIFT 29 |
# define GEN6_3DSTATE_SF_LINE_PROVOKE_SHIFT 27 |
# define GEN6_3DSTATE_SF_TRIFAN_PROVOKE_SHIFT 25 |
#define GEN6_3DSTATE_WM GEN6_3D(3, 0, 0x14) |
/* DW2 */ |
# define GEN6_3DSTATE_WM_SAMPLER_COUNT_SHITF 27 |
# define GEN6_3DSTATE_WM_BINDING_TABLE_ENTRY_COUNT_SHIFT 18 |
/* DW4 */ |
# define GEN6_3DSTATE_WM_DISPATCH_START_GRF_0_SHIFT 16 |
/* DW5 */ |
# define GEN6_3DSTATE_WM_MAX_THREADS_SHIFT 25 |
# define GEN6_3DSTATE_WM_DISPATCH_ENABLE (1 << 19) |
# define GEN6_3DSTATE_WM_16_DISPATCH_ENABLE (1 << 1) |
# define GEN6_3DSTATE_WM_8_DISPATCH_ENABLE (1 << 0) |
/* DW6 */ |
# define GEN6_3DSTATE_WM_NUM_SF_OUTPUTS_SHIFT 20 |
# define GEN6_3DSTATE_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC (1 << 15) |
# define GEN6_3DSTATE_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC (1 << 14) |
# define GEN6_3DSTATE_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC (1 << 13) |
# define GEN6_3DSTATE_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC (1 << 12) |
# define GEN6_3DSTATE_WM_PERSPECTIVE_CENTROID_BARYCENTRIC (1 << 11) |
# define GEN6_3DSTATE_WM_PERSPECTIVE_PIXEL_BARYCENTRIC (1 << 10) |
#define GEN6_3DSTATE_CONSTANT_VS GEN6_3D(3, 0, 0x15) |
#define GEN6_3DSTATE_CONSTANT_GS GEN6_3D(3, 0, 0x16) |
#define GEN6_3DSTATE_CONSTANT_PS GEN6_3D(3, 0, 0x17) |
#define GEN6_3DSTATE_SAMPLE_MASK GEN6_3D(3, 0, 0x18) |
#define GEN6_3DSTATE_MULTISAMPLE GEN6_3D(3, 1, 0x0d) |
/* DW1 */ |
# define GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_CENTER (0 << 4) |
# define GEN6_3DSTATE_MULTISAMPLE_PIXEL_LOCATION_UPPER_LEFT (1 << 4) |
# define GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_1 (0 << 1) |
# define GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_4 (2 << 1) |
# define GEN6_3DSTATE_MULTISAMPLE_NUMSAMPLES_8 (3 << 1) |
#define PIPELINE_SELECT_3D 0 |
#define PIPELINE_SELECT_MEDIA 1 |
#define UF0_CS_REALLOC (1 << 13) |
#define UF0_VFE_REALLOC (1 << 12) |
#define UF0_SF_REALLOC (1 << 11) |
#define UF0_CLIP_REALLOC (1 << 10) |
#define UF0_GS_REALLOC (1 << 9) |
#define UF0_VS_REALLOC (1 << 8) |
#define UF1_CLIP_FENCE_SHIFT 20 |
#define UF1_GS_FENCE_SHIFT 10 |
#define UF1_VS_FENCE_SHIFT 0 |
#define UF2_CS_FENCE_SHIFT 20 |
#define UF2_VFE_FENCE_SHIFT 10 |
#define UF2_SF_FENCE_SHIFT 0 |
/* for GEN6_STATE_BASE_ADDRESS */ |
#define BASE_ADDRESS_MODIFY (1 << 0) |
/* for GEN6_3DSTATE_PIPELINED_POINTERS */ |
#define GEN6_GS_DISABLE 0 |
#define GEN6_GS_ENABLE 1 |
#define GEN6_CLIP_DISABLE 0 |
#define GEN6_CLIP_ENABLE 1 |
/* for GEN6_PIPE_CONTROL */ |
#define GEN6_PIPE_CONTROL GEN6_3D(3, 2, 0) |
#define GEN6_PIPE_CONTROL_CS_STALL (1 << 20) |
#define GEN6_PIPE_CONTROL_NOWRITE (0 << 14) |
#define GEN6_PIPE_CONTROL_WRITE_QWORD (1 << 14) |
#define GEN6_PIPE_CONTROL_WRITE_DEPTH (2 << 14) |
#define GEN6_PIPE_CONTROL_WRITE_TIME (3 << 14) |
#define GEN6_PIPE_CONTROL_DEPTH_STALL (1 << 13) |
#define GEN6_PIPE_CONTROL_WC_FLUSH (1 << 12) |
#define GEN6_PIPE_CONTROL_IS_FLUSH (1 << 11) |
#define GEN6_PIPE_CONTROL_TC_FLUSH (1 << 10) |
#define GEN6_PIPE_CONTROL_NOTIFY_ENABLE (1 << 8) |
#define GEN6_PIPE_CONTROL_GLOBAL_GTT (1 << 2) |
#define GEN6_PIPE_CONTROL_LOCAL_PGTT (0 << 2) |
#define GEN6_PIPE_CONTROL_STALL_AT_SCOREBOARD (1 << 1) |
#define GEN6_PIPE_CONTROL_DEPTH_CACHE_FLUSH (1 << 0) |
/* 3DPRIMITIVE bits */ |
#define GEN6_3DPRIMITIVE_VERTEX_SEQUENTIAL (0 << 15) |
#define GEN6_3DPRIMITIVE_VERTEX_RANDOM (1 << 15) |
/* Primitive types are in gen6_defines.h */ |
#define GEN6_3DPRIMITIVE_TOPOLOGY_SHIFT 10 |
#define GEN6_SVG_CTL 0x7400 |
#define GEN6_SVG_CTL_GS_BA (0 << 8) |
#define GEN6_SVG_CTL_SS_BA (1 << 8) |
#define GEN6_SVG_CTL_IO_BA (2 << 8) |
#define GEN6_SVG_CTL_GS_AUB (3 << 8) |
#define GEN6_SVG_CTL_IO_AUB (4 << 8) |
#define GEN6_SVG_CTL_SIP (5 << 8) |
#define GEN6_SVG_RDATA 0x7404 |
#define GEN6_SVG_WORK_CTL 0x7408 |
#define GEN6_VF_CTL 0x7500 |
#define GEN6_VF_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_VF_CTL_SNAPSHOT_MUX_SELECT_THREADID (0 << 8) |
#define GEN6_VF_CTL_SNAPSHOT_MUX_SELECT_VF_DEBUG (1 << 8) |
#define GEN6_VF_CTL_SNAPSHOT_TYPE_VERTEX_SEQUENCE (0 << 4) |
#define GEN6_VF_CTL_SNAPSHOT_TYPE_VERTEX_INDEX (1 << 4) |
#define GEN6_VF_CTL_SKIP_INITIAL_PRIMITIVES (1 << 3) |
#define GEN6_VF_CTL_MAX_PRIMITIVES_LIMIT_ENABLE (1 << 2) |
#define GEN6_VF_CTL_VERTEX_RANGE_LIMIT_ENABLE (1 << 1) |
#define GEN6_VF_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_VF_STRG_VAL 0x7504 |
#define GEN6_VF_STR_VL_OVR 0x7508 |
#define GEN6_VF_VC_OVR 0x750c |
#define GEN6_VF_STR_PSKIP 0x7510 |
#define GEN6_VF_MAX_PRIM 0x7514 |
#define GEN6_VF_RDATA 0x7518 |
#define GEN6_VS_CTL 0x7600 |
#define GEN6_VS_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VERTEX_0 (0 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VERTEX_1 (1 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VALID_COUNT (2 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_MUX_VS_KERNEL_POINTER (3 << 8) |
#define GEN6_VS_CTL_SNAPSHOT_ALL_THREADS (1 << 2) |
#define GEN6_VS_CTL_THREAD_SNAPSHOT_ENABLE (1 << 1) |
#define GEN6_VS_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_VS_STRG_VAL 0x7604 |
#define GEN6_VS_RDATA 0x7608 |
#define GEN6_SF_CTL 0x7b00 |
#define GEN6_SF_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_0_FF_ID (0 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_0_REL_COUNT (1 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_1_FF_ID (2 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_1_REL_COUNT (3 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_2_FF_ID (4 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_2_REL_COUNT (5 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_VERTEX_COUNT (6 << 8) |
#define GEN6_SF_CTL_SNAPSHOT_MUX_SF_KERNEL_POINTER (7 << 8) |
#define GEN6_SF_CTL_MIN_MAX_PRIMITIVE_RANGE_ENABLE (1 << 4) |
#define GEN6_SF_CTL_DEBUG_CLIP_RECTANGLE_ENABLE (1 << 3) |
#define GEN6_SF_CTL_SNAPSHOT_ALL_THREADS (1 << 2) |
#define GEN6_SF_CTL_THREAD_SNAPSHOT_ENABLE (1 << 1) |
#define GEN6_SF_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_SF_STRG_VAL 0x7b04 |
#define GEN6_SF_RDATA 0x7b18 |
#define GEN6_WIZ_CTL 0x7c00 |
#define GEN6_WIZ_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_WIZ_CTL_SUBSPAN_INSTANCE_SHIFT 16 |
#define GEN6_WIZ_CTL_SNAPSHOT_MUX_WIZ_KERNEL_POINTER (0 << 8) |
#define GEN6_WIZ_CTL_SNAPSHOT_MUX_SUBSPAN_INSTANCE (1 << 8) |
#define GEN6_WIZ_CTL_SNAPSHOT_MUX_PRIMITIVE_SEQUENCE (2 << 8) |
#define GEN6_WIZ_CTL_SINGLE_SUBSPAN_DISPATCH (1 << 6) |
#define GEN6_WIZ_CTL_IGNORE_COLOR_SCOREBOARD_STALLS (1 << 5) |
#define GEN6_WIZ_CTL_ENABLE_SUBSPAN_INSTANCE_COMPARE (1 << 4) |
#define GEN6_WIZ_CTL_USE_UPSTREAM_SNAPSHOT_FLAG (1 << 3) |
#define GEN6_WIZ_CTL_SNAPSHOT_ALL_THREADS (1 << 2) |
#define GEN6_WIZ_CTL_THREAD_SNAPSHOT_ENABLE (1 << 1) |
#define GEN6_WIZ_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_WIZ_STRG_VAL 0x7c04 |
#define GEN6_WIZ_RDATA 0x7c18 |
#define GEN6_TS_CTL 0x7e00 |
#define GEN6_TS_CTL_SNAPSHOT_COMPLETE (1 << 31) |
#define GEN6_TS_CTL_SNAPSHOT_MESSAGE_ERROR (0 << 8) |
#define GEN6_TS_CTL_SNAPSHOT_INTERFACE_DESCRIPTOR (3 << 8) |
#define GEN6_TS_CTL_SNAPSHOT_ALL_CHILD_THREADS (1 << 2) |
#define GEN6_TS_CTL_SNAPSHOT_ALL_ROOT_THREADS (1 << 1) |
#define GEN6_TS_CTL_SNAPSHOT_ENABLE (1 << 0) |
#define GEN6_TS_STRG_VAL 0x7e04 |
#define GEN6_TS_RDATA 0x7e08 |
#define GEN6_TD_CTL 0x8000 |
#define GEN6_TD_CTL_MUX_SHIFT 8 |
#define GEN6_TD_CTL_EXTERNAL_HALT_R0_DEBUG_MATCH (1 << 7) |
#define GEN6_TD_CTL_FORCE_EXTERNAL_HALT (1 << 6) |
#define GEN6_TD_CTL_EXCEPTION_MASK_OVERRIDE (1 << 5) |
#define GEN6_TD_CTL_FORCE_THREAD_BREAKPOINT_ENABLE (1 << 4) |
#define GEN6_TD_CTL_BREAKPOINT_ENABLE (1 << 2) |
#define GEN6_TD_CTL2 0x8004 |
#define GEN6_TD_CTL2_ILLEGAL_OPCODE_EXCEPTION_OVERRIDE (1 << 28) |
#define GEN6_TD_CTL2_MASKSTACK_EXCEPTION_OVERRIDE (1 << 26) |
#define GEN6_TD_CTL2_SOFTWARE_EXCEPTION_OVERRIDE (1 << 25) |
#define GEN6_TD_CTL2_ACTIVE_THREAD_LIMIT_SHIFT 16 |
#define GEN6_TD_CTL2_ACTIVE_THREAD_LIMIT_ENABLE (1 << 8) |
#define GEN6_TD_CTL2_THREAD_SPAWNER_EXECUTION_MASK_ENABLE (1 << 7) |
#define GEN6_TD_CTL2_WIZ_EXECUTION_MASK_ENABLE (1 << 6) |
#define GEN6_TD_CTL2_SF_EXECUTION_MASK_ENABLE (1 << 5) |
#define GEN6_TD_CTL2_CLIPPER_EXECUTION_MASK_ENABLE (1 << 4) |
#define GEN6_TD_CTL2_GS_EXECUTION_MASK_ENABLE (1 << 3) |
#define GEN6_TD_CTL2_VS_EXECUTION_MASK_ENABLE (1 << 0) |
#define GEN6_TD_VF_VS_EMSK 0x8008 |
#define GEN6_TD_GS_EMSK 0x800c |
#define GEN6_TD_CLIP_EMSK 0x8010 |
#define GEN6_TD_SF_EMSK 0x8014 |
#define GEN6_TD_WIZ_EMSK 0x8018 |
#define GEN6_TD_0_6_EHTRG_VAL 0x801c |
#define GEN6_TD_0_7_EHTRG_VAL 0x8020 |
#define GEN6_TD_0_6_EHTRG_MSK 0x8024 |
#define GEN6_TD_0_7_EHTRG_MSK 0x8028 |
#define GEN6_TD_RDATA 0x802c |
#define GEN6_TD_TS_EMSK 0x8030 |
#define GEN6_EU_CTL 0x8800 |
#define GEN6_EU_CTL_SELECT_SHIFT 16 |
#define GEN6_EU_CTL_DATA_MUX_SHIFT 8 |
#define GEN6_EU_ATT_0 0x8810 |
#define GEN6_EU_ATT_1 0x8814 |
#define GEN6_EU_ATT_DATA_0 0x8820 |
#define GEN6_EU_ATT_DATA_1 0x8824 |
#define GEN6_EU_ATT_CLR_0 0x8830 |
#define GEN6_EU_ATT_CLR_1 0x8834 |
#define GEN6_EU_RDATA 0x8840 |
/* 3D state: |
*/ |
#define _3DOP_3DSTATE_PIPELINED 0x0 |
#define _3DOP_3DSTATE_NONPIPELINED 0x1 |
#define _3DOP_3DCONTROL 0x2 |
#define _3DOP_3DPRIMITIVE 0x3 |
#define _3DSTATE_PIPELINED_POINTERS 0x00 |
#define _3DSTATE_BINDING_TABLE_POINTERS 0x01 |
#define _3DSTATE_VERTEX_BUFFERS 0x08 |
#define _3DSTATE_VERTEX_ELEMENTS 0x09 |
#define _3DSTATE_INDEX_BUFFER 0x0A |
#define _3DSTATE_VF_STATISTICS 0x0B |
#define _3DSTATE_DRAWING_RECTANGLE 0x00 |
#define _3DSTATE_CONSTANT_COLOR 0x01 |
#define _3DSTATE_SAMPLER_PALETTE_LOAD 0x02 |
#define _3DSTATE_CHROMA_KEY 0x04 |
#define _3DSTATE_DEPTH_BUFFER 0x05 |
#define _3DSTATE_POLY_STIPPLE_OFFSET 0x06 |
#define _3DSTATE_POLY_STIPPLE_PATTERN 0x07 |
#define _3DSTATE_LINE_STIPPLE 0x08 |
#define _3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP 0x09 |
#define _3DCONTROL 0x00 |
#define _3DPRIMITIVE 0x00 |
#define _3DPRIM_POINTLIST 0x01 |
#define _3DPRIM_LINELIST 0x02 |
#define _3DPRIM_LINESTRIP 0x03 |
#define _3DPRIM_TRILIST 0x04 |
#define _3DPRIM_TRISTRIP 0x05 |
#define _3DPRIM_TRIFAN 0x06 |
#define _3DPRIM_QUADLIST 0x07 |
#define _3DPRIM_QUADSTRIP 0x08 |
#define _3DPRIM_LINELIST_ADJ 0x09 |
#define _3DPRIM_LINESTRIP_ADJ 0x0A |
#define _3DPRIM_TRILIST_ADJ 0x0B |
#define _3DPRIM_TRISTRIP_ADJ 0x0C |
#define _3DPRIM_TRISTRIP_REVERSE 0x0D |
#define _3DPRIM_POLYGON 0x0E |
#define _3DPRIM_RECTLIST 0x0F |
#define _3DPRIM_LINELOOP 0x10 |
#define _3DPRIM_POINTLIST_BF 0x11 |
#define _3DPRIM_LINESTRIP_CONT 0x12 |
#define _3DPRIM_LINESTRIP_BF 0x13 |
#define _3DPRIM_LINESTRIP_CONT_BF 0x14 |
#define _3DPRIM_TRIFAN_NOSTIPPLE 0x15 |
#define _3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL 0 |
#define _3DPRIM_VERTEXBUFFER_ACCESS_RANDOM 1 |
#define GEN6_ANISORATIO_2 0 |
#define GEN6_ANISORATIO_4 1 |
#define GEN6_ANISORATIO_6 2 |
#define GEN6_ANISORATIO_8 3 |
#define GEN6_ANISORATIO_10 4 |
#define GEN6_ANISORATIO_12 5 |
#define GEN6_ANISORATIO_14 6 |
#define GEN6_ANISORATIO_16 7 |
#define GEN6_BLENDFACTOR_ONE 0x1 |
#define GEN6_BLENDFACTOR_SRC_COLOR 0x2 |
#define GEN6_BLENDFACTOR_SRC_ALPHA 0x3 |
#define GEN6_BLENDFACTOR_DST_ALPHA 0x4 |
#define GEN6_BLENDFACTOR_DST_COLOR 0x5 |
#define GEN6_BLENDFACTOR_SRC_ALPHA_SATURATE 0x6 |
#define GEN6_BLENDFACTOR_CONST_COLOR 0x7 |
#define GEN6_BLENDFACTOR_CONST_ALPHA 0x8 |
#define GEN6_BLENDFACTOR_SRC1_COLOR 0x9 |
#define GEN6_BLENDFACTOR_SRC1_ALPHA 0x0A |
#define GEN6_BLENDFACTOR_ZERO 0x11 |
#define GEN6_BLENDFACTOR_INV_SRC_COLOR 0x12 |
#define GEN6_BLENDFACTOR_INV_SRC_ALPHA 0x13 |
#define GEN6_BLENDFACTOR_INV_DST_ALPHA 0x14 |
#define GEN6_BLENDFACTOR_INV_DST_COLOR 0x15 |
#define GEN6_BLENDFACTOR_INV_CONST_COLOR 0x17 |
#define GEN6_BLENDFACTOR_INV_CONST_ALPHA 0x18 |
#define GEN6_BLENDFACTOR_INV_SRC1_COLOR 0x19 |
#define GEN6_BLENDFACTOR_INV_SRC1_ALPHA 0x1A |
#define GEN6_BLENDFUNCTION_ADD 0 |
#define GEN6_BLENDFUNCTION_SUBTRACT 1 |
#define GEN6_BLENDFUNCTION_REVERSE_SUBTRACT 2 |
#define GEN6_BLENDFUNCTION_MIN 3 |
#define GEN6_BLENDFUNCTION_MAX 4 |
#define GEN6_ALPHATEST_FORMAT_UNORM8 0 |
#define GEN6_ALPHATEST_FORMAT_FLOAT32 1 |
#define GEN6_CHROMAKEY_KILL_ON_ANY_MATCH 0 |
#define GEN6_CHROMAKEY_REPLACE_BLACK 1 |
#define GEN6_CLIP_API_OGL 0 |
#define GEN6_CLIP_API_DX 1 |
#define GEN6_CLIPMODE_NORMAL 0 |
#define GEN6_CLIPMODE_CLIP_ALL 1 |
#define GEN6_CLIPMODE_CLIP_NON_REJECTED 2 |
#define GEN6_CLIPMODE_REJECT_ALL 3 |
#define GEN6_CLIPMODE_ACCEPT_ALL 4 |
#define GEN6_CLIP_NDCSPACE 0 |
#define GEN6_CLIP_SCREENSPACE 1 |
#define GEN6_COMPAREFUNCTION_ALWAYS 0 |
#define GEN6_COMPAREFUNCTION_NEVER 1 |
#define GEN6_COMPAREFUNCTION_LESS 2 |
#define GEN6_COMPAREFUNCTION_EQUAL 3 |
#define GEN6_COMPAREFUNCTION_LEQUAL 4 |
#define GEN6_COMPAREFUNCTION_GREATER 5 |
#define GEN6_COMPAREFUNCTION_NOTEQUAL 6 |
#define GEN6_COMPAREFUNCTION_GEQUAL 7 |
#define GEN6_COVERAGE_PIXELS_HALF 0 |
#define GEN6_COVERAGE_PIXELS_1 1 |
#define GEN6_COVERAGE_PIXELS_2 2 |
#define GEN6_COVERAGE_PIXELS_4 3 |
#define GEN6_CULLMODE_BOTH 0 |
#define GEN6_CULLMODE_NONE 1 |
#define GEN6_CULLMODE_FRONT 2 |
#define GEN6_CULLMODE_BACK 3 |
#define GEN6_DEFAULTCOLOR_R8G8B8A8_UNORM 0 |
#define GEN6_DEFAULTCOLOR_R32G32B32A32_FLOAT 1 |
#define GEN6_DEPTHFORMAT_D32_FLOAT_S8X24_UINT 0 |
#define GEN6_DEPTHFORMAT_D32_FLOAT 1 |
#define GEN6_DEPTHFORMAT_D24_UNORM_S8_UINT 2 |
#define GEN6_DEPTHFORMAT_D16_UNORM 5 |
#define GEN6_FLOATING_POINT_IEEE_754 0 |
#define GEN6_FLOATING_POINT_NON_IEEE_754 1 |
#define GEN6_FRONTWINDING_CW 0 |
#define GEN6_FRONTWINDING_CCW 1 |
#define GEN6_INDEX_BYTE 0 |
#define GEN6_INDEX_WORD 1 |
#define GEN6_INDEX_DWORD 2 |
#define GEN6_LOGICOPFUNCTION_CLEAR 0 |
#define GEN6_LOGICOPFUNCTION_NOR 1 |
#define GEN6_LOGICOPFUNCTION_AND_INVERTED 2 |
#define GEN6_LOGICOPFUNCTION_COPY_INVERTED 3 |
#define GEN6_LOGICOPFUNCTION_AND_REVERSE 4 |
#define GEN6_LOGICOPFUNCTION_INVERT 5 |
#define GEN6_LOGICOPFUNCTION_XOR 6 |
#define GEN6_LOGICOPFUNCTION_NAND 7 |
#define GEN6_LOGICOPFUNCTION_AND 8 |
#define GEN6_LOGICOPFUNCTION_EQUIV 9 |
#define GEN6_LOGICOPFUNCTION_NOOP 10 |
#define GEN6_LOGICOPFUNCTION_OR_INVERTED 11 |
#define GEN6_LOGICOPFUNCTION_COPY 12 |
#define GEN6_LOGICOPFUNCTION_OR_REVERSE 13 |
#define GEN6_LOGICOPFUNCTION_OR 14 |
#define GEN6_LOGICOPFUNCTION_SET 15 |
#define GEN6_MAPFILTER_NEAREST 0x0 |
#define GEN6_MAPFILTER_LINEAR 0x1 |
#define GEN6_MAPFILTER_ANISOTROPIC 0x2 |
#define GEN6_MIPFILTER_NONE 0 |
#define GEN6_MIPFILTER_NEAREST 1 |
#define GEN6_MIPFILTER_LINEAR 3 |
#define GEN6_POLYGON_FRONT_FACING 0 |
#define GEN6_POLYGON_BACK_FACING 1 |
#define GEN6_PREFILTER_ALWAYS 0x0 |
#define GEN6_PREFILTER_NEVER 0x1 |
#define GEN6_PREFILTER_LESS 0x2 |
#define GEN6_PREFILTER_EQUAL 0x3 |
#define GEN6_PREFILTER_LEQUAL 0x4 |
#define GEN6_PREFILTER_GREATER 0x5 |
#define GEN6_PREFILTER_NOTEQUAL 0x6 |
#define GEN6_PREFILTER_GEQUAL 0x7 |
#define GEN6_PROVOKING_VERTEX_0 0 |
#define GEN6_PROVOKING_VERTEX_1 1 |
#define GEN6_PROVOKING_VERTEX_2 2 |
#define GEN6_RASTRULE_UPPER_LEFT 0 |
#define GEN6_RASTRULE_UPPER_RIGHT 1 |
#define GEN6_RENDERTARGET_CLAMPRANGE_UNORM 0 |
#define GEN6_RENDERTARGET_CLAMPRANGE_SNORM 1 |
#define GEN6_RENDERTARGET_CLAMPRANGE_FORMAT 2 |
#define GEN6_STENCILOP_KEEP 0 |
#define GEN6_STENCILOP_ZERO 1 |
#define GEN6_STENCILOP_REPLACE 2 |
#define GEN6_STENCILOP_INCRSAT 3 |
#define GEN6_STENCILOP_DECRSAT 4 |
#define GEN6_STENCILOP_INCR 5 |
#define GEN6_STENCILOP_DECR 6 |
#define GEN6_STENCILOP_INVERT 7 |
#define GEN6_SURFACE_MIPMAPLAYOUT_BELOW 0 |
#define GEN6_SURFACE_MIPMAPLAYOUT_RIGHT 1 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_FLOAT 0x000 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_SINT 0x001 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_UINT 0x002 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_UNORM 0x003 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_SNORM 0x004 |
#define GEN6_SURFACEFORMAT_R64G64_FLOAT 0x005 |
#define GEN6_SURFACEFORMAT_R32G32B32X32_FLOAT 0x006 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_SSCALED 0x007 |
#define GEN6_SURFACEFORMAT_R32G32B32A32_USCALED 0x008 |
#define GEN6_SURFACEFORMAT_R32G32B32_FLOAT 0x040 |
#define GEN6_SURFACEFORMAT_R32G32B32_SINT 0x041 |
#define GEN6_SURFACEFORMAT_R32G32B32_UINT 0x042 |
#define GEN6_SURFACEFORMAT_R32G32B32_UNORM 0x043 |
#define GEN6_SURFACEFORMAT_R32G32B32_SNORM 0x044 |
#define GEN6_SURFACEFORMAT_R32G32B32_SSCALED 0x045 |
#define GEN6_SURFACEFORMAT_R32G32B32_USCALED 0x046 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_UNORM 0x080 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_SNORM 0x081 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_SINT 0x082 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_UINT 0x083 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_FLOAT 0x084 |
#define GEN6_SURFACEFORMAT_R32G32_FLOAT 0x085 |
#define GEN6_SURFACEFORMAT_R32G32_SINT 0x086 |
#define GEN6_SURFACEFORMAT_R32G32_UINT 0x087 |
#define GEN6_SURFACEFORMAT_R32_FLOAT_X8X24_TYPELESS 0x088 |
#define GEN6_SURFACEFORMAT_X32_TYPELESS_G8X24_UINT 0x089 |
#define GEN6_SURFACEFORMAT_L32A32_FLOAT 0x08A |
#define GEN6_SURFACEFORMAT_R32G32_UNORM 0x08B |
#define GEN6_SURFACEFORMAT_R32G32_SNORM 0x08C |
#define GEN6_SURFACEFORMAT_R64_FLOAT 0x08D |
#define GEN6_SURFACEFORMAT_R16G16B16X16_UNORM 0x08E |
#define GEN6_SURFACEFORMAT_R16G16B16X16_FLOAT 0x08F |
#define GEN6_SURFACEFORMAT_A32X32_FLOAT 0x090 |
#define GEN6_SURFACEFORMAT_L32X32_FLOAT 0x091 |
#define GEN6_SURFACEFORMAT_I32X32_FLOAT 0x092 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_SSCALED 0x093 |
#define GEN6_SURFACEFORMAT_R16G16B16A16_USCALED 0x094 |
#define GEN6_SURFACEFORMAT_R32G32_SSCALED 0x095 |
#define GEN6_SURFACEFORMAT_R32G32_USCALED 0x096 |
#define GEN6_SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0 |
#define GEN6_SURFACEFORMAT_B8G8R8A8_UNORM_SRGB 0x0C1 |
#define GEN6_SURFACEFORMAT_R10G10B10A2_UNORM 0x0C2 |
#define GEN6_SURFACEFORMAT_R10G10B10A2_UNORM_SRGB 0x0C3 |
#define GEN6_SURFACEFORMAT_R10G10B10A2_UINT 0x0C4 |
#define GEN6_SURFACEFORMAT_R10G10B10_SNORM_A2_UNORM 0x0C5 |
#define GEN6_SURFACEFORMAT_R8G8B8A8_UNORM 0x0C7 |
#define GEN6_SURFACEFORMAT_R8G8B8A8_UNORM_SRGB 0x0C8 |
#define GEN6_SURFACEFORMAT_R8G8B8A8_SNORM 0x0C9 |
#define GEN6_SURFACEFORMAT_R8G8B8A8_SINT 0x0CA |
#define GEN6_SURFACEFORMAT_R8G8B8A8_UINT 0x0CB |
#define GEN6_SURFACEFORMAT_R16G16_UNORM 0x0CC |
#define GEN6_SURFACEFORMAT_R16G16_SNORM 0x0CD |
#define GEN6_SURFACEFORMAT_R16G16_SINT 0x0CE |
#define GEN6_SURFACEFORMAT_R16G16_UINT 0x0CF |
#define GEN6_SURFACEFORMAT_R16G16_FLOAT 0x0D0 |
#define GEN6_SURFACEFORMAT_B10G10R10A2_UNORM 0x0D1 |
#define GEN6_SURFACEFORMAT_B10G10R10A2_UNORM_SRGB 0x0D2 |
#define GEN6_SURFACEFORMAT_R11G11B10_FLOAT 0x0D3 |
#define GEN6_SURFACEFORMAT_R32_SINT 0x0D6 |
#define GEN6_SURFACEFORMAT_R32_UINT 0x0D7 |
#define GEN6_SURFACEFORMAT_R32_FLOAT 0x0D8 |
#define GEN6_SURFACEFORMAT_R24_UNORM_X8_TYPELESS 0x0D9 |
#define GEN6_SURFACEFORMAT_X24_TYPELESS_G8_UINT 0x0DA |
#define GEN6_SURFACEFORMAT_L16A16_UNORM 0x0DF |
#define GEN6_SURFACEFORMAT_I24X8_UNORM 0x0E0 |
#define GEN6_SURFACEFORMAT_L24X8_UNORM 0x0E1 |
#define GEN6_SURFACEFORMAT_A24X8_UNORM 0x0E2 |
#define GEN6_SURFACEFORMAT_I32_FLOAT 0x0E3 |
#define GEN6_SURFACEFORMAT_L32_FLOAT 0x0E4 |
#define GEN6_SURFACEFORMAT_A32_FLOAT 0x0E5 |
#define GEN6_SURFACEFORMAT_B8G8R8X8_UNORM 0x0E9 |
#define GEN6_SURFACEFORMAT_B8G8R8X8_UNORM_SRGB 0x0EA |
#define GEN6_SURFACEFORMAT_R8G8B8X8_UNORM 0x0EB |
#define GEN6_SURFACEFORMAT_R8G8B8X8_UNORM_SRGB 0x0EC |
#define GEN6_SURFACEFORMAT_R9G9B9E5_SHAREDEXP 0x0ED |
#define GEN6_SURFACEFORMAT_B10G10R10X2_UNORM 0x0EE |
#define GEN6_SURFACEFORMAT_L16A16_FLOAT 0x0F0 |
#define GEN6_SURFACEFORMAT_R32_UNORM 0x0F1 |
#define GEN6_SURFACEFORMAT_R32_SNORM 0x0F2 |
#define GEN6_SURFACEFORMAT_R10G10B10X2_USCALED 0x0F3 |
#define GEN6_SURFACEFORMAT_R8G8B8A8_SSCALED 0x0F4 |
#define GEN6_SURFACEFORMAT_R8G8B8A8_USCALED 0x0F5 |
#define GEN6_SURFACEFORMAT_R16G16_SSCALED 0x0F6 |
#define GEN6_SURFACEFORMAT_R16G16_USCALED 0x0F7 |
#define GEN6_SURFACEFORMAT_R32_SSCALED 0x0F8 |
#define GEN6_SURFACEFORMAT_R32_USCALED 0x0F9 |
#define GEN6_SURFACEFORMAT_B5G6R5_UNORM 0x100 |
#define GEN6_SURFACEFORMAT_B5G6R5_UNORM_SRGB 0x101 |
#define GEN6_SURFACEFORMAT_B5G5R5A1_UNORM 0x102 |
#define GEN6_SURFACEFORMAT_B5G5R5A1_UNORM_SRGB 0x103 |
#define GEN6_SURFACEFORMAT_B4G4R4A4_UNORM 0x104 |
#define GEN6_SURFACEFORMAT_B4G4R4A4_UNORM_SRGB 0x105 |
#define GEN6_SURFACEFORMAT_R8G8_UNORM 0x106 |
#define GEN6_SURFACEFORMAT_R8G8_SNORM 0x107 |
#define GEN6_SURFACEFORMAT_R8G8_SINT 0x108 |
#define GEN6_SURFACEFORMAT_R8G8_UINT 0x109 |
#define GEN6_SURFACEFORMAT_R16_UNORM 0x10A |
#define GEN6_SURFACEFORMAT_R16_SNORM 0x10B |
#define GEN6_SURFACEFORMAT_R16_SINT 0x10C |
#define GEN6_SURFACEFORMAT_R16_UINT 0x10D |
#define GEN6_SURFACEFORMAT_R16_FLOAT 0x10E |
#define GEN6_SURFACEFORMAT_I16_UNORM 0x111 |
#define GEN6_SURFACEFORMAT_L16_UNORM 0x112 |
#define GEN6_SURFACEFORMAT_A16_UNORM 0x113 |
#define GEN6_SURFACEFORMAT_L8A8_UNORM 0x114 |
#define GEN6_SURFACEFORMAT_I16_FLOAT 0x115 |
#define GEN6_SURFACEFORMAT_L16_FLOAT 0x116 |
#define GEN6_SURFACEFORMAT_A16_FLOAT 0x117 |
#define GEN6_SURFACEFORMAT_R5G5_SNORM_B6_UNORM 0x119 |
#define GEN6_SURFACEFORMAT_B5G5R5X1_UNORM 0x11A |
#define GEN6_SURFACEFORMAT_B5G5R5X1_UNORM_SRGB 0x11B |
#define GEN6_SURFACEFORMAT_R8G8_SSCALED 0x11C |
#define GEN6_SURFACEFORMAT_R8G8_USCALED 0x11D |
#define GEN6_SURFACEFORMAT_R16_SSCALED 0x11E |
#define GEN6_SURFACEFORMAT_R16_USCALED 0x11F |
#define GEN6_SURFACEFORMAT_R8_UNORM 0x140 |
#define GEN6_SURFACEFORMAT_R8_SNORM 0x141 |
#define GEN6_SURFACEFORMAT_R8_SINT 0x142 |
#define GEN6_SURFACEFORMAT_R8_UINT 0x143 |
#define GEN6_SURFACEFORMAT_A8_UNORM 0x144 |
#define GEN6_SURFACEFORMAT_I8_UNORM 0x145 |
#define GEN6_SURFACEFORMAT_L8_UNORM 0x146 |
#define GEN6_SURFACEFORMAT_P4A4_UNORM 0x147 |
#define GEN6_SURFACEFORMAT_A4P4_UNORM 0x148 |
#define GEN6_SURFACEFORMAT_R8_SSCALED 0x149 |
#define GEN6_SURFACEFORMAT_R8_USCALED 0x14A |
#define GEN6_SURFACEFORMAT_R1_UINT 0x181 |
#define GEN6_SURFACEFORMAT_YCRCB_NORMAL 0x182 |
#define GEN6_SURFACEFORMAT_YCRCB_SWAPUVY 0x183 |
#define GEN6_SURFACEFORMAT_BC1_UNORM 0x186 |
#define GEN6_SURFACEFORMAT_BC2_UNORM 0x187 |
#define GEN6_SURFACEFORMAT_BC3_UNORM 0x188 |
#define GEN6_SURFACEFORMAT_BC4_UNORM 0x189 |
#define GEN6_SURFACEFORMAT_BC5_UNORM 0x18A |
#define GEN6_SURFACEFORMAT_BC1_UNORM_SRGB 0x18B |
#define GEN6_SURFACEFORMAT_BC2_UNORM_SRGB 0x18C |
#define GEN6_SURFACEFORMAT_BC3_UNORM_SRGB 0x18D |
#define GEN6_SURFACEFORMAT_MONO8 0x18E |
#define GEN6_SURFACEFORMAT_YCRCB_SWAPUV 0x18F |
#define GEN6_SURFACEFORMAT_YCRCB_SWAPY 0x190 |
#define GEN6_SURFACEFORMAT_DXT1_RGB 0x191 |
#define GEN6_SURFACEFORMAT_FXT1 0x192 |
#define GEN6_SURFACEFORMAT_R8G8B8_UNORM 0x193 |
#define GEN6_SURFACEFORMAT_R8G8B8_SNORM 0x194 |
#define GEN6_SURFACEFORMAT_R8G8B8_SSCALED 0x195 |
#define GEN6_SURFACEFORMAT_R8G8B8_USCALED 0x196 |
#define GEN6_SURFACEFORMAT_R64G64B64A64_FLOAT 0x197 |
#define GEN6_SURFACEFORMAT_R64G64B64_FLOAT 0x198 |
#define GEN6_SURFACEFORMAT_BC4_SNORM 0x199 |
#define GEN6_SURFACEFORMAT_BC5_SNORM 0x19A |
#define GEN6_SURFACEFORMAT_R16G16B16_UNORM 0x19C |
#define GEN6_SURFACEFORMAT_R16G16B16_SNORM 0x19D |
#define GEN6_SURFACEFORMAT_R16G16B16_SSCALED 0x19E |
#define GEN6_SURFACEFORMAT_R16G16B16_USCALED 0x19F |
#define GEN6_SURFACERETURNFORMAT_FLOAT32 0 |
#define GEN6_SURFACERETURNFORMAT_S1 1 |
#define GEN6_SURFACE_1D 0 |
#define GEN6_SURFACE_2D 1 |
#define GEN6_SURFACE_3D 2 |
#define GEN6_SURFACE_CUBE 3 |
#define GEN6_SURFACE_BUFFER 4 |
#define GEN6_SURFACE_NULL 7 |
#define GEN6_BORDER_COLOR_MODE_DEFAULT 0 |
#define GEN6_BORDER_COLOR_MODE_LEGACY 1 |
#define GEN6_TEXCOORDMODE_WRAP 0 |
#define GEN6_TEXCOORDMODE_MIRROR 1 |
#define GEN6_TEXCOORDMODE_CLAMP 2 |
#define GEN6_TEXCOORDMODE_CUBE 3 |
#define GEN6_TEXCOORDMODE_CLAMP_BORDER 4 |
#define GEN6_TEXCOORDMODE_MIRROR_ONCE 5 |
#define GEN6_THREAD_PRIORITY_NORMAL 0 |
#define GEN6_THREAD_PRIORITY_HIGH 1 |
#define GEN6_TILEWALK_XMAJOR 0 |
#define GEN6_TILEWALK_YMAJOR 1 |
#define GEN6_VERTEX_SUBPIXEL_PRECISION_8BITS 0 |
#define GEN6_VERTEX_SUBPIXEL_PRECISION_4BITS 1 |
#define GEN6_VERTEXBUFFER_ACCESS_VERTEXDATA 0 |
#define GEN6_VERTEXBUFFER_ACCESS_INSTANCEDATA 1 |
#define GEN6_VFCOMPONENT_NOSTORE 0 |
#define GEN6_VFCOMPONENT_STORE_SRC 1 |
#define GEN6_VFCOMPONENT_STORE_0 2 |
#define GEN6_VFCOMPONENT_STORE_1_FLT 3 |
#define GEN6_VFCOMPONENT_STORE_1_INT 4 |
#define GEN6_VFCOMPONENT_STORE_VID 5 |
#define GEN6_VFCOMPONENT_STORE_IID 6 |
#define GEN6_VFCOMPONENT_STORE_PID 7 |
/* Execution Unit (EU) defines |
*/ |
#define GEN6_ALIGN_1 0 |
#define GEN6_ALIGN_16 1 |
#define GEN6_ADDRESS_DIRECT 0 |
#define GEN6_ADDRESS_REGISTER_INDIRECT_REGISTER 1 |
#define GEN6_CHANNEL_X 0 |
#define GEN6_CHANNEL_Y 1 |
#define GEN6_CHANNEL_Z 2 |
#define GEN6_CHANNEL_W 3 |
#define GEN6_COMPRESSION_NONE 0 |
#define GEN6_COMPRESSION_2NDHALF 1 |
#define GEN6_COMPRESSION_COMPRESSED 2 |
#define GEN6_CONDITIONAL_NONE 0 |
#define GEN6_CONDITIONAL_Z 1 |
#define GEN6_CONDITIONAL_NZ 2 |
#define GEN6_CONDITIONAL_EQ 1 /* Z */ |
#define GEN6_CONDITIONAL_NEQ 2 /* NZ */ |
#define GEN6_CONDITIONAL_G 3 |
#define GEN6_CONDITIONAL_GE 4 |
#define GEN6_CONDITIONAL_L 5 |
#define GEN6_CONDITIONAL_LE 6 |
#define GEN6_CONDITIONAL_C 7 |
#define GEN6_CONDITIONAL_O 8 |
#define GEN6_DEBUG_NONE 0 |
#define GEN6_DEBUG_BREAKPOINT 1 |
#define GEN6_DEPENDENCY_NORMAL 0 |
#define GEN6_DEPENDENCY_NOTCLEARED 1 |
#define GEN6_DEPENDENCY_NOTCHECKED 2 |
#define GEN6_DEPENDENCY_DISABLE 3 |
#define GEN6_EXECUTE_1 0 |
#define GEN6_EXECUTE_2 1 |
#define GEN6_EXECUTE_4 2 |
#define GEN6_EXECUTE_8 3 |
#define GEN6_EXECUTE_16 4 |
#define GEN6_EXECUTE_32 5 |
#define GEN6_HORIZONTAL_STRIDE_0 0 |
#define GEN6_HORIZONTAL_STRIDE_1 1 |
#define GEN6_HORIZONTAL_STRIDE_2 2 |
#define GEN6_HORIZONTAL_STRIDE_4 3 |
#define GEN6_INSTRUCTION_NORMAL 0 |
#define GEN6_INSTRUCTION_SATURATE 1 |
#define GEN6_MASK_ENABLE 0 |
#define GEN6_MASK_DISABLE 1 |
#define GEN6_OPCODE_MOV 1 |
#define GEN6_OPCODE_SEL 2 |
#define GEN6_OPCODE_NOT 4 |
#define GEN6_OPCODE_AND 5 |
#define GEN6_OPCODE_OR 6 |
#define GEN6_OPCODE_XOR 7 |
#define GEN6_OPCODE_SHR 8 |
#define GEN6_OPCODE_SHL 9 |
#define GEN6_OPCODE_RSR 10 |
#define GEN6_OPCODE_RSL 11 |
#define GEN6_OPCODE_ASR 12 |
#define GEN6_OPCODE_CMP 16 |
#define GEN6_OPCODE_JMPI 32 |
#define GEN6_OPCODE_IF 34 |
#define GEN6_OPCODE_IFF 35 |
#define GEN6_OPCODE_ELSE 36 |
#define GEN6_OPCODE_ENDIF 37 |
#define GEN6_OPCODE_DO 38 |
#define GEN6_OPCODE_WHILE 39 |
#define GEN6_OPCODE_BREAK 40 |
#define GEN6_OPCODE_CONTINUE 41 |
#define GEN6_OPCODE_HALT 42 |
#define GEN6_OPCODE_MSAVE 44 |
#define GEN6_OPCODE_MRESTORE 45 |
#define GEN6_OPCODE_PUSH 46 |
#define GEN6_OPCODE_POP 47 |
#define GEN6_OPCODE_WAIT 48 |
#define GEN6_OPCODE_SEND 49 |
#define GEN6_OPCODE_ADD 64 |
#define GEN6_OPCODE_MUL 65 |
#define GEN6_OPCODE_AVG 66 |
#define GEN6_OPCODE_FRC 67 |
#define GEN6_OPCODE_RNDU 68 |
#define GEN6_OPCODE_RNDD 69 |
#define GEN6_OPCODE_RNDE 70 |
#define GEN6_OPCODE_RNDZ 71 |
#define GEN6_OPCODE_MAC 72 |
#define GEN6_OPCODE_MACH 73 |
#define GEN6_OPCODE_LZD 74 |
#define GEN6_OPCODE_SAD2 80 |
#define GEN6_OPCODE_SADA2 81 |
#define GEN6_OPCODE_DP4 84 |
#define GEN6_OPCODE_DPH 85 |
#define GEN6_OPCODE_DP3 86 |
#define GEN6_OPCODE_DP2 87 |
#define GEN6_OPCODE_DPA2 88 |
#define GEN6_OPCODE_LINE 89 |
#define GEN6_OPCODE_NOP 126 |
#define GEN6_PREDICATE_NONE 0 |
#define GEN6_PREDICATE_NORMAL 1 |
#define GEN6_PREDICATE_ALIGN1_ANYV 2 |
#define GEN6_PREDICATE_ALIGN1_ALLV 3 |
#define GEN6_PREDICATE_ALIGN1_ANY2H 4 |
#define GEN6_PREDICATE_ALIGN1_ALL2H 5 |
#define GEN6_PREDICATE_ALIGN1_ANY4H 6 |
#define GEN6_PREDICATE_ALIGN1_ALL4H 7 |
#define GEN6_PREDICATE_ALIGN1_ANY8H 8 |
#define GEN6_PREDICATE_ALIGN1_ALL8H 9 |
#define GEN6_PREDICATE_ALIGN1_ANY16H 10 |
#define GEN6_PREDICATE_ALIGN1_ALL16H 11 |
#define GEN6_PREDICATE_ALIGN16_REPLICATE_X 2 |
#define GEN6_PREDICATE_ALIGN16_REPLICATE_Y 3 |
#define GEN6_PREDICATE_ALIGN16_REPLICATE_Z 4 |
#define GEN6_PREDICATE_ALIGN16_REPLICATE_W 5 |
#define GEN6_PREDICATE_ALIGN16_ANY4H 6 |
#define GEN6_PREDICATE_ALIGN16_ALL4H 7 |
#define GEN6_ARCHITECTURE_REGISTER_FILE 0 |
#define GEN6_GENERAL_REGISTER_FILE 1 |
#define GEN6_MESSAGE_REGISTER_FILE 2 |
#define GEN6_IMMEDIATE_VALUE 3 |
#define GEN6_REGISTER_TYPE_UD 0 |
#define GEN6_REGISTER_TYPE_D 1 |
#define GEN6_REGISTER_TYPE_UW 2 |
#define GEN6_REGISTER_TYPE_W 3 |
#define GEN6_REGISTER_TYPE_UB 4 |
#define GEN6_REGISTER_TYPE_B 5 |
#define GEN6_REGISTER_TYPE_VF 5 /* packed float vector, immediates only? */ |
#define GEN6_REGISTER_TYPE_HF 6 |
#define GEN6_REGISTER_TYPE_V 6 /* packed int vector, immediates only, uword dest only */ |
#define GEN6_REGISTER_TYPE_F 7 |
#define GEN6_ARF_NULL 0x00 |
#define GEN6_ARF_ADDRESS 0x10 |
#define GEN6_ARF_ACCUMULATOR 0x20 |
#define GEN6_ARF_FLAG 0x30 |
#define GEN6_ARF_MASK 0x40 |
#define GEN6_ARF_MASK_STACK 0x50 |
#define GEN6_ARF_MASK_STACK_DEPTH 0x60 |
#define GEN6_ARF_STATE 0x70 |
#define GEN6_ARF_CONTROL 0x80 |
#define GEN6_ARF_NOTIFICATION_COUNT 0x90 |
#define GEN6_ARF_IP 0xA0 |
#define GEN6_AMASK 0 |
#define GEN6_IMASK 1 |
#define GEN6_LMASK 2 |
#define GEN6_CMASK 3 |
#define GEN6_THREAD_NORMAL 0 |
#define GEN6_THREAD_ATOMIC 1 |
#define GEN6_THREAD_SWITCH 2 |
#define GEN6_VERTICAL_STRIDE_0 0 |
#define GEN6_VERTICAL_STRIDE_1 1 |
#define GEN6_VERTICAL_STRIDE_2 2 |
#define GEN6_VERTICAL_STRIDE_4 3 |
#define GEN6_VERTICAL_STRIDE_8 4 |
#define GEN6_VERTICAL_STRIDE_16 5 |
#define GEN6_VERTICAL_STRIDE_32 6 |
#define GEN6_VERTICAL_STRIDE_64 7 |
#define GEN6_VERTICAL_STRIDE_128 8 |
#define GEN6_VERTICAL_STRIDE_256 9 |
#define GEN6_VERTICAL_STRIDE_ONE_DIMENSIONAL 0xF |
#define GEN6_WIDTH_1 0 |
#define GEN6_WIDTH_2 1 |
#define GEN6_WIDTH_4 2 |
#define GEN6_WIDTH_8 3 |
#define GEN6_WIDTH_16 4 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_1K 0 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_2K 1 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_4K 2 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_8K 3 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_16K 4 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_32K 5 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_64K 6 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_128K 7 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_256K 8 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_512K 9 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_1M 10 |
#define GEN6_STATELESS_BUFFER_BOUNDARY_2M 11 |
#define GEN6_POLYGON_FACING_FRONT 0 |
#define GEN6_POLYGON_FACING_BACK 1 |
#define GEN6_MESSAGE_TARGET_NULL 0 |
#define GEN6_MESSAGE_TARGET_MATH 1 |
#define GEN6_MESSAGE_TARGET_SAMPLER 2 |
#define GEN6_MESSAGE_TARGET_GATEWAY 3 |
#define GEN6_MESSAGE_TARGET_DATAPORT_READ 4 |
#define GEN6_MESSAGE_TARGET_DATAPORT_WRITE 5 |
#define GEN6_MESSAGE_TARGET_URB 6 |
#define GEN6_MESSAGE_TARGET_THREAD_SPAWNER 7 |
#define GEN6_SAMPLER_RETURN_FORMAT_FLOAT32 0 |
#define GEN6_SAMPLER_RETURN_FORMAT_UINT32 2 |
#define GEN6_SAMPLER_RETURN_FORMAT_SINT32 3 |
#define GEN6_SAMPLER_MESSAGE_SIMD8_SAMPLE 0 |
#define GEN6_SAMPLER_MESSAGE_SIMD16_SAMPLE 0 |
#define GEN6_SAMPLER_MESSAGE_SIMD16_SAMPLE_BIAS 0 |
#define GEN6_SAMPLER_MESSAGE_SIMD8_KILLPIX 1 |
#define GEN6_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_LOD 1 |
#define GEN6_SAMPLER_MESSAGE_SIMD16_SAMPLE_LOD 1 |
#define GEN6_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_GRADIENTS 2 |
#define GEN6_SAMPLER_MESSAGE_SIMD8_SAMPLE_GRADIENTS 2 |
#define GEN6_SAMPLER_MESSAGE_SIMD4X2_SAMPLE_COMPARE 0 |
#define GEN6_SAMPLER_MESSAGE_SIMD16_SAMPLE_COMPARE 2 |
#define GEN6_SAMPLER_MESSAGE_SIMD4X2_RESINFO 2 |
#define GEN6_SAMPLER_MESSAGE_SIMD8_RESINFO 2 |
#define GEN6_SAMPLER_MESSAGE_SIMD16_RESINFO 2 |
#define GEN6_SAMPLER_MESSAGE_SIMD4X2_LD 3 |
#define GEN6_SAMPLER_MESSAGE_SIMD8_LD 3 |
#define GEN6_SAMPLER_MESSAGE_SIMD16_LD 3 |
#define GEN6_DATAPORT_OWORD_BLOCK_1_OWORDLOW 0 |
#define GEN6_DATAPORT_OWORD_BLOCK_1_OWORDHIGH 1 |
#define GEN6_DATAPORT_OWORD_BLOCK_2_OWORDS 2 |
#define GEN6_DATAPORT_OWORD_BLOCK_4_OWORDS 3 |
#define GEN6_DATAPORT_OWORD_BLOCK_8_OWORDS 4 |
#define GEN6_DATAPORT_OWORD_DUAL_BLOCK_1OWORD 0 |
#define GEN6_DATAPORT_OWORD_DUAL_BLOCK_4OWORDS 2 |
#define GEN6_DATAPORT_DWORD_SCATTERED_BLOCK_8DWORDS 2 |
#define GEN6_DATAPORT_DWORD_SCATTERED_BLOCK_16DWORDS 3 |
#define GEN6_DATAPORT_READ_MESSAGE_OWORD_BLOCK_READ 0 |
#define GEN6_DATAPORT_READ_MESSAGE_OWORD_DUAL_BLOCK_READ 1 |
#define GEN6_DATAPORT_READ_MESSAGE_DWORD_BLOCK_READ 2 |
#define GEN6_DATAPORT_READ_MESSAGE_DWORD_SCATTERED_READ 3 |
#define GEN6_DATAPORT_READ_TARGET_DATA_CACHE 0 |
#define GEN6_DATAPORT_READ_TARGET_RENDER_CACHE 1 |
#define GEN6_DATAPORT_READ_TARGET_SAMPLER_CACHE 2 |
#define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE 0 |
#define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD16_SINGLE_SOURCE_REPLICATED 1 |
#define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN01 2 |
#define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD8_DUAL_SOURCE_SUBSPAN23 3 |
#define GEN6_DATAPORT_RENDER_TARGET_WRITE_SIMD8_SINGLE_SOURCE_SUBSPAN01 4 |
#define GEN6_DATAPORT_WRITE_MESSAGE_OWORD_BLOCK_WRITE 0 |
#define GEN6_DATAPORT_WRITE_MESSAGE_OWORD_DUAL_BLOCK_WRITE 1 |
#define GEN6_DATAPORT_WRITE_MESSAGE_DWORD_BLOCK_WRITE 2 |
#define GEN6_DATAPORT_WRITE_MESSAGE_DWORD_SCATTERED_WRITE 3 |
#define GEN6_DATAPORT_WRITE_MESSAGE_RENDER_TARGET_WRITE 4 |
#define GEN6_DATAPORT_WRITE_MESSAGE_STREAMED_VERTEX_BUFFER_WRITE 5 |
#define GEN6_DATAPORT_WRITE_MESSAGE_FLUSH_RENDER_CACHE 7 |
#define GEN6_MATH_FUNCTION_INV 1 |
#define GEN6_MATH_FUNCTION_LOG 2 |
#define GEN6_MATH_FUNCTION_EXP 3 |
#define GEN6_MATH_FUNCTION_SQRT 4 |
#define GEN6_MATH_FUNCTION_RSQ 5 |
#define GEN6_MATH_FUNCTION_SIN 6 /* was 7 */ |
#define GEN6_MATH_FUNCTION_COS 7 /* was 8 */ |
#define GEN6_MATH_FUNCTION_SINCOS 8 /* was 6 */ |
#define GEN6_MATH_FUNCTION_TAN 9 |
#define GEN6_MATH_FUNCTION_POW 10 |
#define GEN6_MATH_FUNCTION_INT_DIV_QUOTIENT_AND_REMAINDER 11 |
#define GEN6_MATH_FUNCTION_INT_DIV_QUOTIENT 12 |
#define GEN6_MATH_FUNCTION_INT_DIV_REMAINDER 13 |
#define GEN6_MATH_INTEGER_UNSIGNED 0 |
#define GEN6_MATH_INTEGER_SIGNED 1 |
#define GEN6_MATH_PRECISION_FULL 0 |
#define GEN6_MATH_PRECISION_PARTIAL 1 |
#define GEN6_MATH_SATURATE_NONE 0 |
#define GEN6_MATH_SATURATE_SATURATE 1 |
#define GEN6_MATH_DATA_VECTOR 0 |
#define GEN6_MATH_DATA_SCALAR 1 |
#define GEN6_URB_OPCODE_WRITE 0 |
#define GEN6_URB_SWIZZLE_NONE 0 |
#define GEN6_URB_SWIZZLE_INTERLEAVE 1 |
#define GEN6_URB_SWIZZLE_TRANSPOSE 2 |
#define GEN6_SCRATCH_SPACE_SIZE_1K 0 |
#define GEN6_SCRATCH_SPACE_SIZE_2K 1 |
#define GEN6_SCRATCH_SPACE_SIZE_4K 2 |
#define GEN6_SCRATCH_SPACE_SIZE_8K 3 |
#define GEN6_SCRATCH_SPACE_SIZE_16K 4 |
#define GEN6_SCRATCH_SPACE_SIZE_32K 5 |
#define GEN6_SCRATCH_SPACE_SIZE_64K 6 |
#define GEN6_SCRATCH_SPACE_SIZE_128K 7 |
#define GEN6_SCRATCH_SPACE_SIZE_256K 8 |
#define GEN6_SCRATCH_SPACE_SIZE_512K 9 |
#define GEN6_SCRATCH_SPACE_SIZE_1M 10 |
#define GEN6_SCRATCH_SPACE_SIZE_2M 11 |
/* The hardware supports two different modes for border color. The |
* default (OpenGL) mode uses floating-point color channels, while the |
* legacy mode uses 4 bytes. |
* |
* More significantly, the legacy mode respects the components of the |
* border color for channels not present in the source, (whereas the |
* default mode will ignore the border color's alpha channel and use |
* alpha==1 for an RGB source, for example). |
* |
* The legacy mode matches the semantics specified by the Render |
* extension. |
*/ |
struct gen6_sampler_default_border_color { |
float color[4]; |
}; |
struct gen6_sampler_legacy_border_color { |
uint8_t color[4]; |
}; |
struct gen6_sampler_state { |
struct { |
uint32_t shadow_function:3; |
uint32_t lod_bias:11; |
uint32_t min_filter:3; |
uint32_t mag_filter:3; |
uint32_t mip_filter:2; |
uint32_t base_level:5; |
uint32_t pad:1; |
uint32_t lod_preclamp:1; |
uint32_t border_color_mode:1; |
uint32_t pad0:1; |
uint32_t disable:1; |
} ss0; |
struct { |
uint32_t r_wrap_mode:3; |
uint32_t t_wrap_mode:3; |
uint32_t s_wrap_mode:3; |
uint32_t pad:3; |
uint32_t max_lod:10; |
uint32_t min_lod:10; |
} ss1; |
struct { |
uint32_t border_color; |
} ss2; |
struct { |
uint32_t pad:19; |
uint32_t max_aniso:3; |
uint32_t chroma_key_mode:1; |
uint32_t chroma_key_index:2; |
uint32_t chroma_key_enable:1; |
uint32_t monochrome_filter_width:3; |
uint32_t monochrome_filter_height:3; |
} ss3; |
}; |
struct gen6_blend_state { |
struct { |
uint32_t dest_blend_factor:5; |
uint32_t source_blend_factor:5; |
uint32_t pad3:1; |
uint32_t blend_func:3; |
uint32_t pad2:1; |
uint32_t ia_dest_blend_factor:5; |
uint32_t ia_source_blend_factor:5; |
uint32_t pad1:1; |
uint32_t ia_blend_func:3; |
uint32_t pad0:1; |
uint32_t ia_blend_enable:1; |
uint32_t blend_enable:1; |
} blend0; |
struct { |
uint32_t post_blend_clamp_enable:1; |
uint32_t pre_blend_clamp_enable:1; |
uint32_t clamp_range:2; |
uint32_t pad0:4; |
uint32_t x_dither_offset:2; |
uint32_t y_dither_offset:2; |
uint32_t dither_enable:1; |
uint32_t alpha_test_func:3; |
uint32_t alpha_test_enable:1; |
uint32_t pad1:1; |
uint32_t logic_op_func:4; |
uint32_t logic_op_enable:1; |
uint32_t pad2:1; |
uint32_t write_disable_b:1; |
uint32_t write_disable_g:1; |
uint32_t write_disable_r:1; |
uint32_t write_disable_a:1; |
uint32_t pad3:1; |
uint32_t alpha_to_coverage_dither:1; |
uint32_t alpha_to_one:1; |
uint32_t alpha_to_coverage:1; |
} blend1; |
}; |
struct gen6_color_calc_state { |
struct { |
uint32_t alpha_test_format:1; |
uint32_t pad0:14; |
uint32_t round_disable:1; |
uint32_t bf_stencil_ref:8; |
uint32_t stencil_ref:8; |
} cc0; |
union { |
float alpha_ref_f; |
struct { |
uint32_t ui:8; |
uint32_t pad0:24; |
} alpha_ref_fi; |
} cc1; |
float constant_r; |
float constant_g; |
float constant_b; |
float constant_a; |
}; |
struct gen6_depth_stencil_state { |
struct { |
uint32_t pad0:3; |
uint32_t bf_stencil_pass_depth_pass_op:3; |
uint32_t bf_stencil_pass_depth_fail_op:3; |
uint32_t bf_stencil_fail_op:3; |
uint32_t bf_stencil_func:3; |
uint32_t bf_stencil_enable:1; |
uint32_t pad1:2; |
uint32_t stencil_write_enable:1; |
uint32_t stencil_pass_depth_pass_op:3; |
uint32_t stencil_pass_depth_fail_op:3; |
uint32_t stencil_fail_op:3; |
uint32_t stencil_func:3; |
uint32_t stencil_enable:1; |
} ds0; |
struct { |
uint32_t bf_stencil_write_mask:8; |
uint32_t bf_stencil_test_mask:8; |
uint32_t stencil_write_mask:8; |
uint32_t stencil_test_mask:8; |
} ds1; |
struct { |
uint32_t pad0:26; |
uint32_t depth_write_enable:1; |
uint32_t depth_test_func:3; |
uint32_t pad1:1; |
uint32_t depth_test_enable:1; |
} ds2; |
}; |
struct gen6_surface_state { |
struct { |
uint32_t cube_pos_z:1; |
uint32_t cube_neg_z:1; |
uint32_t cube_pos_y:1; |
uint32_t cube_neg_y:1; |
uint32_t cube_pos_x:1; |
uint32_t cube_neg_x:1; |
uint32_t pad:3; |
uint32_t render_cache_read_mode:1; |
uint32_t mipmap_layout_mode:1; |
uint32_t vert_line_stride_ofs:1; |
uint32_t vert_line_stride:1; |
uint32_t color_blend:1; |
uint32_t writedisable_blue:1; |
uint32_t writedisable_green:1; |
uint32_t writedisable_red:1; |
uint32_t writedisable_alpha:1; |
uint32_t surface_format:9; |
uint32_t data_return_format:1; |
uint32_t pad0:1; |
uint32_t surface_type:3; |
} ss0; |
struct { |
uint32_t base_addr; |
} ss1; |
struct { |
uint32_t render_target_rotation:2; |
uint32_t mip_count:4; |
uint32_t width:13; |
uint32_t height:13; |
} ss2; |
struct { |
uint32_t tile_walk:1; |
uint32_t tiled_surface:1; |
uint32_t pad:1; |
uint32_t pitch:18; |
uint32_t depth:11; |
} ss3; |
struct { |
uint32_t pad:19; |
uint32_t min_array_elt:9; |
uint32_t min_lod:4; |
} ss4; |
struct { |
uint32_t pad:20; |
uint32_t y_offset:4; |
uint32_t pad2:1; |
uint32_t x_offset:7; |
} ss5; |
}; |
/* Surface state DW0 */ |
#define GEN6_SURFACE_RC_READ_WRITE (1 << 8) |
#define GEN6_SURFACE_MIPLAYOUT_SHIFT 10 |
#define GEN6_SURFACE_MIPMAPLAYOUT_BELOW 0 |
#define GEN6_SURFACE_MIPMAPLAYOUT_RIGHT 1 |
#define GEN6_SURFACE_CUBEFACE_ENABLES 0x3f |
#define GEN6_SURFACE_BLEND_ENABLED (1 << 13) |
#define GEN6_SURFACE_WRITEDISABLE_B_SHIFT 14 |
#define GEN6_SURFACE_WRITEDISABLE_G_SHIFT 15 |
#define GEN6_SURFACE_WRITEDISABLE_R_SHIFT 16 |
#define GEN6_SURFACE_WRITEDISABLE_A_SHIFT 17 |
#define GEN6_SURFACE_FORMAT_SHIFT 18 |
#define GEN6_SURFACE_FORMAT_MASK INTEL_MASK(26, 18) |
#define GEN6_SURFACE_TYPE_SHIFT 29 |
#define GEN6_SURFACE_TYPE_MASK GEN6_MASK(31, 29) |
#define GEN6_SURFACE_1D 0 |
#define GEN6_SURFACE_2D 1 |
#define GEN6_SURFACE_3D 2 |
#define GEN6_SURFACE_CUBE 3 |
#define GEN6_SURFACE_BUFFER 4 |
#define GEN6_SURFACE_NULL 7 |
/* Surface state DW2 */ |
#define GEN6_SURFACE_HEIGHT_SHIFT 19 |
#define GEN6_SURFACE_HEIGHT_MASK GEN6_MASK(31, 19) |
#define GEN6_SURFACE_WIDTH_SHIFT 6 |
#define GEN6_SURFACE_WIDTH_MASK GEN6_MASK(18, 6) |
#define GEN6_SURFACE_LOD_SHIFT 2 |
#define GEN6_SURFACE_LOD_MASK GEN6_MASK(5, 2) |
/* Surface state DW3 */ |
#define GEN6_SURFACE_DEPTH_SHIFT 21 |
#define GEN6_SURFACE_DEPTH_MASK GEN6_MASK(31, 21) |
#define GEN6_SURFACE_PITCH_SHIFT 3 |
#define GEN6_SURFACE_PITCH_MASK GEN6_MASK(19, 3) |
#define GEN6_SURFACE_TILED (1 << 1) |
#define GEN6_SURFACE_TILED_Y (1 << 0) |
/* Surface state DW4 */ |
#define GEN6_SURFACE_MIN_LOD_SHIFT 28 |
#define GEN6_SURFACE_MIN_LOD_MASK GEN6_MASK(31, 28) |
/* Surface state DW5 */ |
#define GEN6_SURFACE_X_OFFSET_SHIFT 25 |
#define GEN6_SURFACE_X_OFFSET_MASK GEN6_MASK(31, 25) |
#define GEN6_SURFACE_Y_OFFSET_SHIFT 20 |
#define GEN6_SURFACE_Y_OFFSET_MASK GEN6_MASK(23, 20) |
struct gen6_cc_viewport { |
float min_depth; |
float max_depth; |
}; |
typedef enum { |
SAMPLER_FILTER_NEAREST = 0, |
SAMPLER_FILTER_BILINEAR, |
FILTER_COUNT |
} sampler_filter_t; |
typedef enum { |
SAMPLER_EXTEND_NONE = 0, |
SAMPLER_EXTEND_REPEAT, |
SAMPLER_EXTEND_PAD, |
SAMPLER_EXTEND_REFLECT, |
EXTEND_COUNT |
} sampler_extend_t; |
#endif |
/drivers/video/drm/i915/sna/sna.h |
---|
0,0 → 1,125 |
#define FALSE 0 |
#define TRUE 1 |
#define DBG(x) |
//#define DBG(x) dbgprintf x |
#define assert(x) |
#include "compiler.h" |
#include <linux/kernel.h> |
struct pixman_box16 |
{ |
int16_t x1, y1, x2, y2; |
}; |
typedef struct pixman_box16 BoxRec; |
typedef unsigned int CARD32; |
#include "sna_render.h" |
#include "kgem.h" |
#define PictOpClear 0 |
#define PictOpSrc 1 |
#define PictOpDst 2 |
#define PictOpOver 3 |
#define PictOpOverReverse 4 |
#define PictOpIn 5 |
#define PictOpInReverse 6 |
#define PictOpOut 7 |
#define PictOpOutReverse 8 |
#define PictOpAtop 9 |
#define PictOpAtopReverse 10 |
#define PictOpXor 11 |
#define PictOpAdd 12 |
#define PictOpSaturate 13 |
#define PictOpMaximum 13 |
struct sna { |
unsigned flags; |
#define SNA_NO_THROTTLE 0x1 |
#define SNA_NO_DELAYED_FLUSH 0x2 |
// int timer[NUM_TIMERS]; |
// uint16_t timer_active; |
// uint16_t timer_ready; |
// int vblank_interval; |
// struct list deferred_free; |
// struct list dirty_pixmaps; |
// struct list active_pixmaps; |
// struct list inactive_clock[2]; |
unsigned int tiling; |
#define SNA_TILING_DISABLE 0x0 |
#define SNA_TILING_FB 0x1 |
#define SNA_TILING_2D 0x2 |
#define SNA_TILING_3D 0x4 |
#define SNA_TILING_ALL (~0) |
int Chipset; |
// EntityInfoPtr pEnt; |
// struct pci_device *PciInfo; |
// struct intel_chipset chipset; |
// PicturePtr clear; |
struct { |
uint32_t fill_bo; |
uint32_t fill_pixel; |
uint32_t fill_alu; |
} blt_state; |
union { |
// struct gen2_render_state gen2; |
// struct gen3_render_state gen3; |
// struct gen4_render_state gen4; |
// struct gen5_render_state gen5; |
struct gen6_render_state gen6; |
// struct gen7_render_state gen7; |
} render_state; |
uint32_t have_render; |
uint32_t default_tiling; |
// Bool directRenderingOpen; |
// char *deviceName; |
/* Broken-out options. */ |
// OptionInfoPtr Options; |
/* Driver phase/state information */ |
// Bool suspended; |
struct kgem kgem; |
struct sna_render render; |
}; |
static inline int vertex_space(struct sna *sna) |
{ |
return sna->render.vertex_size - sna->render.vertex_used; |
} |
static inline void vertex_emit(struct sna *sna, float v) |
{ |
assert(sna->render.vertex_used < sna->render.vertex_size); |
sna->render.vertices[sna->render.vertex_used++] = v; |
} |
static inline void vertex_emit_2s(struct sna *sna, int16_t x, int16_t y) |
{ |
int16_t *v = (int16_t *)&sna->render.vertices[sna->render.vertex_used++]; |
assert(sna->render.vertex_used <= sna->render.vertex_size); |
v[0] = x; |
v[1] = y; |
} |
static inline void batch_emit(struct sna *sna, uint32_t dword) |
{ |
assert(sna->kgem.mode != KGEM_NONE); |
assert(sna->kgem.nbatch + KGEM_BATCH_RESERVED < sna->kgem.surface); |
sna->kgem.batch[sna->kgem.nbatch++] = dword; |
} |
/drivers/video/drm/i915/sna/sna_reg.h |
---|
0,0 → 1,81 |
#ifndef SNA_REG_H |
#define SNA_REG_H |
/* Flush */ |
#define MI_FLUSH (0x04<<23) |
#define MI_FLUSH_DW (0x26<<23) |
#define MI_WRITE_DIRTY_STATE (1<<4) |
#define MI_END_SCENE (1<<3) |
#define MI_GLOBAL_SNAPSHOT_COUNT_RESET (1<<3) |
#define MI_INHIBIT_RENDER_CACHE_FLUSH (1<<2) |
#define MI_STATE_INSTRUCTION_CACHE_FLUSH (1<<1) |
#define MI_INVALIDATE_MAP_CACHE (1<<0) |
/* broadwater flush bits */ |
#define BRW_MI_GLOBAL_SNAPSHOT_RESET (1 << 3) |
#define MI_BATCH_BUFFER_END (0xA << 23) |
/* Noop */ |
#define MI_NOOP 0x00 |
#define MI_NOOP_WRITE_ID (1<<22) |
#define MI_NOOP_ID_MASK (1<<22 - 1) |
/* Wait for Events */ |
#define MI_WAIT_FOR_EVENT (0x03<<23) |
#define MI_WAIT_FOR_PIPEB_SVBLANK (1<<18) |
#define MI_WAIT_FOR_PIPEA_SVBLANK (1<<17) |
#define MI_WAIT_FOR_OVERLAY_FLIP (1<<16) |
#define MI_WAIT_FOR_PIPEB_VBLANK (1<<7) |
#define MI_WAIT_FOR_PIPEB_SCAN_LINE_WINDOW (1<<5) |
#define MI_WAIT_FOR_PIPEA_VBLANK (1<<3) |
#define MI_WAIT_FOR_PIPEA_SCAN_LINE_WINDOW (1<<1) |
/* Set the scan line for MI_WAIT_FOR_PIPE?_SCAN_LINE_WINDOW */ |
#define MI_LOAD_SCAN_LINES_INCL (0x12<<23) |
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEA (0) |
#define MI_LOAD_SCAN_LINES_DISPLAY_PIPEB (0x1<<20) |
/* BLT commands */ |
#define BLT_WRITE_ALPHA (1<<21) |
#define BLT_WRITE_RGB (1<<20) |
#define BLT_SRC_TILED (1<<15) |
#define BLT_DST_TILED (1<<11) |
#define COLOR_BLT_CMD ((2<<29)|(0x40<<22)|(0x3)) |
#define XY_COLOR_BLT ((2<<29)|(0x50<<22)|(0x4)) |
#define XY_SETUP_BLT ((2<<29)|(1<<22)|6) |
#define XY_SETUP_MONO_PATTERN_SL_BLT ((2<<29)|(0x11<<22)|7) |
#define XY_SETUP_CLIP ((2<<29)|(3<<22)|1) |
#define XY_SCANLINE_BLT ((2<<29)|(0x25<<22)|1) |
#define XY_TEXT_IMMEDIATE_BLT ((2<<29)|(0x31<<22)|(1<<16)) |
#define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) |
#define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|0x4) |
#define XY_PAT_BLT_IMMEDIATE ((2<<29)|(0x72<<22)) |
#define XY_MONO_PAT ((0x2<<29)|(0x52<<22)|0x7) |
#define XY_MONO_SRC_COPY ((0x2<<29)|(0x54<<22)|(0x6)) |
#define XY_MONO_SRC_COPY_IMM ((0x2<<29)|(0x71<<22)) |
#define XY_FULL_MONO_PATTERN_BLT ((0x2<<29)|(0x57<<22)|0xa) |
#define XY_FULL_MONO_PATTERN_MONO_SRC_BLT ((0x2<<29)|(0x58<<22)|0xa) |
/* FLUSH commands */ |
#define BRW_3D(Pipeline,Opcode,Subopcode) \ |
((3 << 29) | \ |
((Pipeline) << 27) | \ |
((Opcode) << 24) | \ |
((Subopcode) << 16)) |
#define PIPE_CONTROL BRW_3D(3, 2, 0) |
#define PIPE_CONTROL_NOWRITE (0 << 14) |
#define PIPE_CONTROL_WRITE_QWORD (1 << 14) |
#define PIPE_CONTROL_WRITE_DEPTH (2 << 14) |
#define PIPE_CONTROL_WRITE_TIME (3 << 14) |
#define PIPE_CONTROL_DEPTH_STALL (1 << 13) |
#define PIPE_CONTROL_WC_FLUSH (1 << 12) |
#define PIPE_CONTROL_IS_FLUSH (1 << 11) |
#define PIPE_CONTROL_TC_FLUSH (1 << 10) |
#define PIPE_CONTROL_NOTIFY_ENABLE (1 << 8) |
#define PIPE_CONTROL_GLOBAL_GTT (1 << 2) |
#define PIPE_CONTROL_LOCAL_PGTT (0 << 2) |
#define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1 << 0) |
#endif |
/drivers/video/drm/i915/sna/sna_stream.c |
---|
0,0 → 1,108 |
/* |
* Copyright © 2011 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
* SOFTWARE. |
* |
* Authors: |
* Chris Wilson <chris@chris-wilson.co.uk> |
* |
*/ |
#include <linux/kernel.h> |
#include "../bitmap.h" |
#include "sna.h" |
#include "sna_render.h" |
#include <memory.h> |
#if DEBUG_STREAM |
#undef DBG |
#define DBG(x) ErrorF x |
#endif |
int sna_static_stream_init(struct sna_static_stream *stream) |
{ |
stream->used = 0; |
stream->size = 64*1024; |
stream->data = malloc(stream->size); |
return stream->data != NULL; |
} |
static uint32_t sna_static_stream_alloc(struct sna_static_stream *stream, |
uint32_t len, uint32_t align) |
{ |
uint32_t offset = ALIGN(stream->used, align); |
uint32_t size = offset + len; |
if (size > stream->size) { |
/* |
do |
stream->size *= 2; |
while (stream->size < size); |
stream->data = realloc(stream->data, stream->size); |
*/ |
dbgprintf("%s: EPIC FAIL\n", __FUNCTION__); |
return 0; |
} |
stream->used = size; |
return offset; |
} |
uint32_t sna_static_stream_add(struct sna_static_stream *stream, |
const void *data, uint32_t len, uint32_t align) |
{ |
uint32_t offset = sna_static_stream_alloc(stream, len, align); |
memcpy(stream->data + offset, data, len); |
return offset; |
} |
void *sna_static_stream_map(struct sna_static_stream *stream, |
uint32_t len, uint32_t align) |
{ |
uint32_t offset = sna_static_stream_alloc(stream, len, align); |
return memset(stream->data + offset, 0, len); |
} |
uint32_t sna_static_stream_offsetof(struct sna_static_stream *stream, void *ptr) |
{ |
return (uint8_t *)ptr - stream->data; |
} |
struct kgem_bo *sna_static_stream_fini(struct sna *sna, |
struct sna_static_stream *stream) |
{ |
struct kgem_bo *bo; |
DBG(("uploaded %d bytes of static state\n", stream->used)); |
bo = kgem_create_linear(&sna->kgem, stream->used); |
if (bo && !kgem_bo_write(&sna->kgem, bo, stream->data, stream->used)) { |
// kgem_bo_destroy(&sna->kgem, bo); |
return NULL; |
} |
free(stream->data); |
LEAVE(); |
return bo; |
} |
/drivers/video/drm/i915/sna |
---|
Property changes: |
Added: bugtraq:number |
+true |
\ No newline at end of property |
/drivers/video/drm/i915/render/exa_wm_mask_affine.g6b |
---|
0,0 → 1,4 |
{ 0x0060005a, 0x210077be, 0x00000100, 0x008d0040 }, |
{ 0x0060005a, 0x212077be, 0x00000100, 0x008d0080 }, |
{ 0x0060005a, 0x214077be, 0x00000110, 0x008d0040 }, |
{ 0x0060005a, 0x216077be, 0x00000110, 0x008d0080 }, |
/drivers/video/drm/i915/render/exa_wm_mask_sample_a.g6b |
---|
0,0 → 1,3 |
{ 0x00000201, 0x20080061, 0x00000000, 0x00007000 }, |
{ 0x00600001, 0x20e00022, 0x008d0000, 0x00000000 }, |
{ 0x02800031, 0x23801cc9, 0x000000e0, 0x0a2a0102 }, |
/drivers/video/drm/i915/render/exa_wm_noca.g6b |
---|
0,0 → 1,4 |
{ 0x00800041, 0x21c077bd, 0x008d01c0, 0x008d0380 }, |
{ 0x00800041, 0x220077bd, 0x008d0200, 0x008d0380 }, |
{ 0x00800041, 0x224077bd, 0x008d0240, 0x008d0380 }, |
{ 0x00800041, 0x228077bd, 0x008d0280, 0x008d0380 }, |
/drivers/video/drm/i915/render/exa_wm_src_affine.g6b |
---|
0,0 → 1,4 |
{ 0x0060005a, 0x204077be, 0x000000c0, 0x008d0040 }, |
{ 0x0060005a, 0x206077be, 0x000000c0, 0x008d0080 }, |
{ 0x0060005a, 0x208077be, 0x000000d0, 0x008d0040 }, |
{ 0x0060005a, 0x20a077be, 0x000000d0, 0x008d0080 }, |
/drivers/video/drm/i915/render/exa_wm_src_projective.g6b |
---|
0,0 → 1,12 |
{ 0x0060005a, 0x23c077bd, 0x000000e0, 0x008d0040 }, |
{ 0x0060005a, 0x23e077bd, 0x000000e0, 0x008d0080 }, |
{ 0x01600038, 0x218003bd, 0x008d03c0, 0x00000000 }, |
{ 0x01600038, 0x21a003bd, 0x008d03e0, 0x00000000 }, |
{ 0x0060005a, 0x23c077bd, 0x000000c0, 0x008d0040 }, |
{ 0x0060005a, 0x23e077bd, 0x000000c0, 0x008d0080 }, |
{ 0x00600041, 0x204077be, 0x008d03c0, 0x008d0180 }, |
{ 0x00600041, 0x206077be, 0x008d03e0, 0x008d01a0 }, |
{ 0x0060005a, 0x23c077bd, 0x000000d0, 0x008d0040 }, |
{ 0x0060005a, 0x23e077bd, 0x000000d0, 0x008d0080 }, |
{ 0x00600041, 0x208077be, 0x008d03c0, 0x008d0180 }, |
{ 0x00600041, 0x20a077be, 0x008d03e0, 0x008d01a0 }, |
/drivers/video/drm/i915/render/exa_wm_src_sample_argb.g6b |
---|
0,0 → 1,3 |
{ 0x00000201, 0x20080061, 0x00000000, 0x00000000 }, |
{ 0x00600001, 0x20200022, 0x008d0000, 0x00000000 }, |
{ 0x02800031, 0x21c01cc9, 0x00000020, 0x0a8a0001 }, |
/drivers/video/drm/i915/render/exa_wm_write.g6b |
---|
0,0 → 1,17 |
{ 0x00600001, 0x204003be, 0x008d01c0, 0x00000000 }, |
{ 0x00600001, 0x206003be, 0x008d01e0, 0x00000000 }, |
{ 0x00600001, 0x208003be, 0x008d0200, 0x00000000 }, |
{ 0x00600001, 0x20a003be, 0x008d0220, 0x00000000 }, |
{ 0x00600001, 0x20c003be, 0x008d0240, 0x00000000 }, |
{ 0x00600001, 0x20e003be, 0x008d0260, 0x00000000 }, |
{ 0x00600001, 0x210003be, 0x008d0280, 0x00000000 }, |
{ 0x00600001, 0x212003be, 0x008d02a0, 0x00000000 }, |
{ 0x05800031, 0x24001cc8, 0x00000040, 0x90019000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
{ 0x0000007e, 0x00000000, 0x00000000, 0x00000000 }, |
/drivers/video/drm/i915/render |
---|
Property changes: |
Added: bugtraq:number |
+true |
\ No newline at end of property |
/drivers/video/drm/drm_crtc_helper.c |
---|
69,7 → 69,6 |
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); |
static bool drm_kms_helper_poll = true; |
module_param_named(poll, drm_kms_helper_poll, bool, 0600); |
static void drm_mode_validate_flag(struct drm_connector *connector, |
int flags) |
142,12 → 141,6 |
// dbgprintf("status %x\n", connector->status); |
} |
/* Re-enable polling in case the global poll config changed. */ |
if (drm_kms_helper_poll != dev->mode_config.poll_running) |
drm_kms_helper_poll_enable(dev); |
dev->mode_config.poll_running = drm_kms_helper_poll; |
if (connector->status == connector_status_disconnected) { |
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", |
connector->base.id, drm_get_connector_name(connector)); |
962,13 → 955,7 |
} |
EXPORT_SYMBOL(drm_helper_resume_force_mode); |
void drm_kms_helper_hotplug_event(struct drm_device *dev) |
{ |
/* send a uevent + call fbdev */ |
if (dev->mode_config.funcs->output_poll_changed) |
dev->mode_config.funcs->output_poll_changed(dev); |
} |
EXPORT_SYMBOL(drm_kms_helper_hotplug_event); |
#if 0 |
#define DRM_OUTPUT_POLL_PERIOD (10*HZ) |
static void output_poll_execute(struct work_struct *work) |
1017,8 → 1004,8 |
if (changed) |
drm_kms_helper_hotplug_event(dev); |
// if (repoll) |
// schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); |
if (repoll) |
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD); |
} |
void drm_kms_helper_poll_disable(struct drm_device *dev) |
1025,7 → 1012,7 |
{ |
if (!dev->mode_config.poll_enabled) |
return; |
// cancel_delayed_work_sync(&dev->mode_config.output_poll_work); |
cancel_delayed_work_sync(&dev->mode_config.output_poll_work); |
} |
EXPORT_SYMBOL(drm_kms_helper_poll_disable); |
1043,8 → 1030,8 |
poll = true; |
} |
// if (poll) |
// schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
if (poll) |
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD); |
} |
EXPORT_SYMBOL(drm_kms_helper_poll_enable); |
1096,3 → 1083,5 |
drm_kms_helper_hotplug_event(dev); |
} |
EXPORT_SYMBOL(drm_helper_hpd_irq_event); |
#endif |
/drivers/video/drm/drm_crtc.c |
---|
37,54 → 37,6 |
#include <drm/drm_edid.h> |
#include <drm/drm_fourcc.h> |
/** |
* drm_modeset_lock_all - take all modeset locks |
* @dev: drm device |
* |
* This function takes all modeset locks, suitable where a more fine-grained |
* scheme isn't (yet) implemented. |
*/ |
void drm_modeset_lock_all(struct drm_device *dev) |
{ |
struct drm_crtc *crtc; |
mutex_lock(&dev->mode_config.mutex); |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_modeset_lock_all); |
/** |
* drm_modeset_unlock_all - drop all modeset locks |
* @dev: device |
*/ |
void drm_modeset_unlock_all(struct drm_device *dev) |
{ |
struct drm_crtc *crtc; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
mutex_unlock(&crtc->mutex); |
mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_modeset_unlock_all); |
/** |
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked |
* @dev: device |
*/ |
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) |
{ |
struct drm_crtc *crtc; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
WARN_ON(!mutex_is_locked(&crtc->mutex)); |
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); |
} |
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked); |
/* Avoid boilerplate. I'm tired of typing. */ |
#define DRM_ENUM_NAME_FN(fnname, list) \ |
char *fnname(int val) \ |
251,11 → 203,13 |
} |
/** |
* drm_mode_object_get - allocate a new modeset identifier |
* drm_mode_object_get - allocate a new identifier |
* @dev: DRM device |
* @obj: object pointer, used to generate unique ID |
* @obj_type: object type |
* @ptr: object pointer, used to generate unique ID |
* @type: object type |
* |
* LOCKING: |
* |
* Create a unique identifier based on @ptr in @dev's identifier space. Used |
* for tracking modes, CRTCs and connectors. |
* |
266,28 → 220,36 |
static int drm_mode_object_get(struct drm_device *dev, |
struct drm_mode_object *obj, uint32_t obj_type) |
{ |
int new_id = 0; |
int ret; |
again: |
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) { |
DRM_ERROR("Ran out memory getting a mode number\n"); |
return -ENOMEM; |
} |
mutex_lock(&dev->mode_config.idr_mutex); |
ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL); |
if (ret >= 0) { |
/* |
* Set up the object linking under the protection of the idr |
* lock so that other users can't see inconsistent state. |
*/ |
obj->id = ret; |
obj->type = obj_type; |
} |
ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); |
mutex_unlock(&dev->mode_config.idr_mutex); |
if (ret == -EAGAIN) |
goto again; |
else if (ret) |
return ret; |
return ret < 0 ? ret : 0; |
obj->id = new_id; |
obj->type = obj_type; |
return 0; |
} |
/** |
* drm_mode_object_put - free a modeset identifer |
* drm_mode_object_put - free an identifer |
* @dev: DRM device |
* @object: object to free |
* @id: ID to free |
* |
* LOCKING: |
* Caller must hold DRM mode_config lock. |
* |
* Free @id from @dev's unique identifier pool. |
*/ |
static void drm_mode_object_put(struct drm_device *dev, |
298,24 → 260,11 |
mutex_unlock(&dev->mode_config.idr_mutex); |
} |
/** |
* drm_mode_object_find - look up a drm object with static lifetime |
* @dev: drm device |
* @id: id of the mode object |
* @type: type of the mode object |
* |
* Note that framebuffers cannot be looked up with this functions - since those |
* are reference counted, they need special treatment. |
*/ |
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev, |
uint32_t id, uint32_t type) |
{ |
struct drm_mode_object *obj = NULL; |
/* Framebuffers are reference counted and need their own lookup |
* function.*/ |
WARN_ON(type == DRM_MODE_OBJECT_FB); |
mutex_lock(&dev->mode_config.idr_mutex); |
obj = idr_find(&dev->mode_config.crtc_idr, id); |
if (!obj || (obj->type != type) || (obj->id != id)) |
329,18 → 278,13 |
/** |
* drm_framebuffer_init - initialize a framebuffer |
* @dev: DRM device |
* @fb: framebuffer to be initialized |
* @funcs: ... with these functions |
* |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Allocates an ID for the framebuffer's parent mode object, sets its mode |
* functions & device file and adds it to the master fd list. |
* |
* IMPORTANT: |
* This functions publishes the fb and makes it available for concurrent access |
* by other users. Which means by this point the fb _must_ be fully set up - |
* since all the fb attributes are invariant over its lifetime, no further |
* locking but only correct reference counting is required. |
* |
* RETURNS: |
* Zero on success, error code on failure. |
*/ |
349,242 → 293,49 |
{ |
int ret; |
mutex_lock(&dev->mode_config.fb_lock); |
kref_init(&fb->refcount); |
INIT_LIST_HEAD(&fb->filp_head); |
fb->dev = dev; |
fb->funcs = funcs; |
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); |
if (ret) |
goto out; |
return ret; |
/* Grab the idr reference. */ |
drm_framebuffer_reference(fb); |
fb->dev = dev; |
fb->funcs = funcs; |
dev->mode_config.num_fb++; |
list_add(&fb->head, &dev->mode_config.fb_list); |
out: |
mutex_unlock(&dev->mode_config.fb_lock); |
return 0; |
} |
EXPORT_SYMBOL(drm_framebuffer_init); |
static void drm_framebuffer_free(struct kref *kref) |
{ |
struct drm_framebuffer *fb = |
container_of(kref, struct drm_framebuffer, refcount); |
fb->funcs->destroy(fb); |
} |
static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev, |
uint32_t id) |
{ |
struct drm_mode_object *obj = NULL; |
struct drm_framebuffer *fb; |
mutex_lock(&dev->mode_config.idr_mutex); |
obj = idr_find(&dev->mode_config.crtc_idr, id); |
if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id)) |
fb = NULL; |
else |
fb = obj_to_fb(obj); |
mutex_unlock(&dev->mode_config.idr_mutex); |
return fb; |
} |
/** |
* drm_framebuffer_lookup - look up a drm framebuffer and grab a reference |
* @dev: drm device |
* @id: id of the fb object |
* |
* If successful, this grabs an additional reference to the framebuffer - |
* callers need to make sure to eventually unreference the returned framebuffer |
* again. |
*/ |
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev, |
uint32_t id) |
{ |
struct drm_framebuffer *fb; |
mutex_lock(&dev->mode_config.fb_lock); |
fb = __drm_framebuffer_lookup(dev, id); |
if (fb) |
kref_get(&fb->refcount); |
mutex_unlock(&dev->mode_config.fb_lock); |
return fb; |
} |
EXPORT_SYMBOL(drm_framebuffer_lookup); |
/** |
* drm_framebuffer_unreference - unref a framebuffer |
* @fb: framebuffer to unref |
* |
* This functions decrements the fb's refcount and frees it if it drops to zero. |
*/ |
void drm_framebuffer_unreference(struct drm_framebuffer *fb) |
{ |
DRM_DEBUG("FB ID: %d\n", fb->base.id); |
kref_put(&fb->refcount, drm_framebuffer_free); |
} |
EXPORT_SYMBOL(drm_framebuffer_unreference); |
/** |
* drm_framebuffer_reference - incr the fb refcnt |
* @fb: framebuffer |
*/ |
void drm_framebuffer_reference(struct drm_framebuffer *fb) |
{ |
DRM_DEBUG("FB ID: %d\n", fb->base.id); |
kref_get(&fb->refcount); |
} |
EXPORT_SYMBOL(drm_framebuffer_reference); |
static void drm_framebuffer_free_bug(struct kref *kref) |
{ |
BUG(); |
} |
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb) |
{ |
DRM_DEBUG("FB ID: %d\n", fb->base.id); |
kref_put(&fb->refcount, drm_framebuffer_free_bug); |
} |
/* dev->mode_config.fb_lock must be held! */ |
static void __drm_framebuffer_unregister(struct drm_device *dev, |
struct drm_framebuffer *fb) |
{ |
mutex_lock(&dev->mode_config.idr_mutex); |
idr_remove(&dev->mode_config.crtc_idr, fb->base.id); |
mutex_unlock(&dev->mode_config.idr_mutex); |
fb->base.id = 0; |
__drm_framebuffer_unreference(fb); |
} |
/** |
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr |
* @fb: fb to unregister |
* |
* Drivers need to call this when cleaning up driver-private framebuffers, e.g. |
* those used for fbdev. Note that the caller must hold a reference of it's own, |
* i.e. the object may not be destroyed through this call (since it'll lead to a |
* locking inversion). |
*/ |
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = fb->dev; |
mutex_lock(&dev->mode_config.fb_lock); |
/* Mark fb as reaped and drop idr ref. */ |
__drm_framebuffer_unregister(dev, fb); |
mutex_unlock(&dev->mode_config.fb_lock); |
} |
EXPORT_SYMBOL(drm_framebuffer_unregister_private); |
/** |
* drm_framebuffer_cleanup - remove a framebuffer object |
* @fb: framebuffer to remove |
* |
* Cleanup references to a user-created framebuffer. This function is intended |
* to be used from the drivers ->destroy callback. |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Note that this function does not remove the fb from active usuage - if it is |
* still used anywhere, hilarity can ensue since userspace could call getfb on |
* the id and get back -EINVAL. Obviously no concern at driver unload time. |
* |
* Also, the framebuffer will not be removed from the lookup idr - for |
* user-created framebuffers this will happen in in the rmfb ioctl. For |
* driver-private objects (e.g. for fbdev) drivers need to explicitly call |
* drm_framebuffer_unregister_private. |
* Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes |
* it, setting it to NULL. |
*/ |
void drm_framebuffer_cleanup(struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = fb->dev; |
mutex_lock(&dev->mode_config.fb_lock); |
/* |
* This could be moved to drm_framebuffer_remove(), but for |
* debugging is nice to keep around the list of fb's that are |
* no longer associated w/ a drm_file but are not unreferenced |
* yet. (i915 and omapdrm have debugfs files which will show |
* this.) |
*/ |
drm_mode_object_put(dev, &fb->base); |
list_del(&fb->head); |
dev->mode_config.num_fb--; |
mutex_unlock(&dev->mode_config.fb_lock); |
} |
EXPORT_SYMBOL(drm_framebuffer_cleanup); |
/** |
* drm_framebuffer_remove - remove and unreference a framebuffer object |
* @fb: framebuffer to remove |
* |
* Scans all the CRTCs and planes in @dev's mode_config. If they're |
* using @fb, removes it, setting it to NULL. Then drops the reference to the |
* passed-in framebuffer. Might take the modeset locks. |
* |
* Note that this function optimizes the cleanup away if the caller holds the |
* last reference to the framebuffer. It is also guaranteed to not take the |
* modeset locks in this case. |
*/ |
void drm_framebuffer_remove(struct drm_framebuffer *fb) |
{ |
struct drm_device *dev = fb->dev; |
struct drm_crtc *crtc; |
struct drm_plane *plane; |
struct drm_mode_set set; |
int ret; |
WARN_ON(!list_empty(&fb->filp_head)); |
/* |
* drm ABI mandates that we remove any deleted framebuffers from active |
* useage. But since most sane clients only remove framebuffers they no |
* longer need, try to optimize this away. |
* |
* Since we're holding a reference ourselves, observing a refcount of 1 |
* means that we're the last holder and can skip it. Also, the refcount |
* can never increase from 1 again, so we don't need any barriers or |
* locks. |
* |
* Note that userspace could try to race with use and instate a new |
* usage _after_ we've cleared all current ones. End result will be an |
* in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot |
* in this manner. |
*/ |
if (atomic_read(&fb->refcount.refcount) > 1) { |
drm_modeset_lock_all(dev); |
/* remove from any CRTC */ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (crtc->fb == fb) { |
/* should turn off the crtc */ |
memset(&set, 0, sizeof(struct drm_mode_set)); |
set.crtc = crtc; |
set.fb = NULL; |
ret = drm_mode_set_config_internal(&set); |
if (ret) |
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); |
} |
} |
list_for_each_entry(plane, &dev->mode_config.plane_list, head) { |
if (plane->fb == fb) { |
/* should turn off the crtc */ |
ret = plane->funcs->disable_plane(plane); |
if (ret) |
DRM_ERROR("failed to disable plane with busy fb\n"); |
/* disconnect the plane from the fb and crtc: */ |
__drm_framebuffer_unreference(plane->fb); |
plane->fb = NULL; |
plane->crtc = NULL; |
} |
} |
drm_modeset_unlock_all(dev); |
} |
drm_framebuffer_unreference(fb); |
} |
EXPORT_SYMBOL(drm_framebuffer_remove); |
/** |
* drm_crtc_init - Initialise a new CRTC object |
* @dev: DRM device |
591,6 → 342,9 |
* @crtc: CRTC object to init |
* @funcs: callbacks for the new CRTC |
* |
* LOCKING: |
* Takes mode_config lock. |
* |
* Inits a new object created as base part of an driver crtc object. |
* |
* RETURNS: |
605,9 → 359,7 |
crtc->funcs = funcs; |
crtc->invert_dimensions = false; |
drm_modeset_lock_all(dev); |
mutex_init(&crtc->mutex); |
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex); |
mutex_lock(&dev->mode_config.mutex); |
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); |
if (ret) |
619,7 → 371,7 |
dev->mode_config.num_crtc++; |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
629,6 → 381,9 |
* drm_crtc_cleanup - Cleans up the core crtc usage. |
* @crtc: CRTC to cleanup |
* |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Cleanup @crtc. Removes from drm modesetting space |
* does NOT free object, caller does that. |
*/ |
650,6 → 405,9 |
* @connector: connector the new mode |
* @mode: mode data |
* |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Add @mode to @connector's mode list for later use. |
*/ |
void drm_mode_probed_add(struct drm_connector *connector, |
664,6 → 422,9 |
* @connector: connector list to modify |
* @mode: mode to remove |
* |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Remove @mode from @connector's mode list, then free it. |
*/ |
void drm_mode_remove(struct drm_connector *connector, |
679,8 → 440,11 |
* @dev: DRM device |
* @connector: the connector to init |
* @funcs: callbacks for this connector |
* @connector_type: user visible type of the connector |
* @name: user visible name of the connector |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Initialises a preallocated connector. Connectors should be |
* subclassed as part of driver connector objects. |
* |
694,7 → 458,7 |
{ |
int ret; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR); |
if (ret) |
724,7 → 488,7 |
dev->mode_config.dpms_property, 0); |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
734,6 → 498,9 |
* drm_connector_cleanup - cleans up an initialised connector |
* @connector: connector to cleanup |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Cleans up the connector but doesn't free the object. |
*/ |
void drm_connector_cleanup(struct drm_connector *connector) |
750,9 → 517,11 |
list_for_each_entry_safe(mode, t, &connector->user_modes, head) |
drm_mode_remove(connector, mode); |
mutex_lock(&dev->mode_config.mutex); |
drm_mode_object_put(dev, &connector->base); |
list_del(&connector->head); |
dev->mode_config.num_connector--; |
mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_connector_cleanup); |
774,7 → 543,7 |
{ |
int ret; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER); |
if (ret) |
788,7 → 557,7 |
dev->mode_config.num_encoder++; |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
797,11 → 566,11 |
void drm_encoder_cleanup(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
drm_mode_object_put(dev, &encoder->base); |
list_del(&encoder->head); |
dev->mode_config.num_encoder--; |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_encoder_cleanup); |
813,7 → 582,7 |
{ |
int ret; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); |
if (ret) |
847,7 → 616,7 |
} |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
857,7 → 626,7 |
{ |
struct drm_device *dev = plane->dev; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
kfree(plane->format_types); |
drm_mode_object_put(dev, &plane->base); |
/* if not added to a list, it must be a private plane */ |
865,7 → 634,7 |
list_del(&plane->head); |
dev->mode_config.num_plane--; |
} |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_plane_cleanup); |
873,6 → 642,9 |
* drm_mode_create - create a new display mode |
* @dev: DRM device |
* |
* LOCKING: |
* Caller must hold DRM mode_config lock. |
* |
* Create a new drm_display_mode, give it an ID, and return it. |
* |
* RETURNS: |
900,6 → 672,9 |
* @dev: DRM device |
* @mode: mode to remove |
* |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Free @mode's unique identifier, then free it. |
*/ |
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode) |
1124,19 → 899,16 |
* drm_mode_config_init - initialize DRM mode_configuration structure |
* @dev: DRM device |
* |
* LOCKING: |
* None, should happen single threaded at init time. |
* |
* Initialize @dev's mode_config structure, used for tracking the graphics |
* configuration of @dev. |
* |
* Since this initializes the modeset locks, no locking is possible. Which is no |
* problem, since this should happen single threaded at init time. It is the |
* driver's problem to ensure this guarantee. |
* |
*/ |
void drm_mode_config_init(struct drm_device *dev) |
{ |
mutex_init(&dev->mode_config.mutex); |
mutex_init(&dev->mode_config.idr_mutex); |
mutex_init(&dev->mode_config.fb_lock); |
INIT_LIST_HEAD(&dev->mode_config.fb_list); |
INIT_LIST_HEAD(&dev->mode_config.crtc_list); |
INIT_LIST_HEAD(&dev->mode_config.connector_list); |
1146,9 → 918,9 |
INIT_LIST_HEAD(&dev->mode_config.plane_list); |
idr_init(&dev->mode_config.crtc_idr); |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
drm_mode_create_standard_connector_properties(dev); |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
/* Just to be sure */ |
dev->mode_config.num_fb = 0; |
1206,13 → 978,12 |
* drm_mode_config_cleanup - free up DRM mode_config info |
* @dev: DRM device |
* |
* LOCKING: |
* Caller must hold mode config lock. |
* |
* Free up all the connectors and CRTCs associated with this DRM device, then |
* free up the framebuffers and associated buffer objects. |
* |
* Note that since this /should/ happen single-threaded at driver/device |
* teardown time, no locking is required. It's the driver's job to ensure that |
* this guarantee actually holds true. |
* |
* FIXME: cleanup any dangling user buffer objects too |
*/ |
void drm_mode_config_cleanup(struct drm_device *dev) |
1239,19 → 1010,6 |
drm_property_destroy(dev, property); |
} |
/* |
* Single-threaded teardown context, so it's not required to grab the |
* fb_lock to protect against concurrent fb_list access. Contrary, it |
* would actually deadlock with the drm_framebuffer_cleanup function. |
* |
* Also, if there are any framebuffers left, that's a driver leak now, |
* so politely WARN about this. |
*/ |
WARN_ON(!list_empty(&dev->mode_config.fb_list)); |
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) { |
drm_framebuffer_remove(fb); |
} |
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list, |
head) { |
plane->funcs->destroy(plane); |
1261,6 → 1019,7 |
crtc->funcs->destroy(crtc); |
} |
idr_remove_all(&dev->mode_config.crtc_idr); |
idr_destroy(&dev->mode_config.crtc_idr); |
} |
EXPORT_SYMBOL(drm_mode_config_cleanup); |
1270,6 → 1029,9 |
* @out: drm_mode_modeinfo struct to return to the user |
* @in: drm_display_mode to use |
* |
* LOCKING: |
* None. |
* |
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to |
* the user. |
*/ |
1306,6 → 1068,9 |
* @out: drm_display_mode to return to the user |
* @in: drm_mode_modeinfo to use |
* |
* LOCKING: |
* None. |
* |
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to |
* the caller. |
* |
1342,10 → 1107,14 |
#if 0 |
/** |
* drm_mode_getresources - get graphics configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Construct a set of configuration description structures and return |
* them to the user, including CRTC, connector and framebuffer configuration. |
* |
1378,8 → 1147,8 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
mutex_lock(&dev->mode_config.mutex); |
mutex_lock(&file_priv->fbs_lock); |
/* |
* For the non-control nodes we need to limit the list of resources |
* by IDs in the group list for this node |
1387,23 → 1156,6 |
list_for_each(lh, &file_priv->fbs) |
fb_count++; |
/* handle this in 4 parts */ |
/* FBs */ |
if (card_res->count_fbs >= fb_count) { |
copied = 0; |
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; |
list_for_each_entry(fb, &file_priv->fbs, filp_head) { |
if (put_user(fb->base.id, fb_id + copied)) { |
mutex_unlock(&file_priv->fbs_lock); |
return -EFAULT; |
} |
copied++; |
} |
} |
card_res->count_fbs = fb_count; |
mutex_unlock(&file_priv->fbs_lock); |
drm_modeset_lock_all(dev); |
mode_group = &file_priv->master->minor->mode_group; |
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { |
1427,6 → 1179,21 |
card_res->max_width = dev->mode_config.max_width; |
card_res->min_width = dev->mode_config.min_width; |
/* handle this in 4 parts */ |
/* FBs */ |
if (card_res->count_fbs >= fb_count) { |
copied = 0; |
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; |
list_for_each_entry(fb, &file_priv->fbs, filp_head) { |
if (put_user(fb->base.id, fb_id + copied)) { |
ret = -EFAULT; |
goto out; |
} |
copied++; |
} |
} |
card_res->count_fbs = fb_count; |
/* CRTCs */ |
if (card_res->count_crtcs >= crtc_count) { |
copied = 0; |
1522,16 → 1289,20 |
card_res->count_connectors, card_res->count_encoders); |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
/** |
* drm_mode_getcrtc - get CRTC configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Construct a CRTC configuration structure to return to the user. |
* |
* Called by the user via ioctl. |
1550,7 → 1321,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, crtc_resp->crtc_id, |
DRM_MODE_OBJECT_CRTC); |
1578,16 → 1349,20 |
} |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
/** |
* drm_mode_getconnector - get connector configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Construct a connector configuration structure to return to the user. |
* |
* Called by the user via ioctl. |
1719,7 → 1494,6 |
out: |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
1734,7 → 1508,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, enc_resp->encoder_id, |
DRM_MODE_OBJECT_ENCODER); |
if (!obj) { |
1753,7 → 1527,7 |
enc_resp->possible_clones = encoder->possible_clones; |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
1763,6 → 1537,9 |
* @data: ioctl data |
* @file_priv: DRM file info |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Return an plane count and set of IDs. |
*/ |
int drm_mode_getplane_res(struct drm_device *dev, void *data, |
1777,7 → 1554,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
config = &dev->mode_config; |
/* |
1799,7 → 1576,7 |
plane_resp->count_planes = config->num_plane; |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
1809,6 → 1586,9 |
* @data: ioctl data |
* @file_priv: DRM file info |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Return plane info, including formats supported, gamma size, any |
* current fb, etc. |
*/ |
1824,7 → 1604,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, plane_resp->plane_id, |
DRM_MODE_OBJECT_PLANE); |
if (!obj) { |
1864,7 → 1644,7 |
plane_resp->count_format_types = plane->format_count; |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
1872,8 → 1652,11 |
* drm_mode_setplane - set up or tear down an plane |
* @dev: DRM device |
* @data: ioctl data* |
* @file_priv: DRM file info |
* @file_prive: DRM file info |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Set plane info, including placement, fb, scaling, and other factors. |
* Or pass a NULL fb to disable. |
*/ |
1884,7 → 1667,7 |
struct drm_mode_object *obj; |
struct drm_plane *plane; |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb = NULL, *old_fb = NULL; |
struct drm_framebuffer *fb; |
int ret = 0; |
unsigned int fb_width, fb_height; |
int i; |
1892,6 → 1675,8 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
mutex_lock(&dev->mode_config.mutex); |
/* |
* First, find the plane, crtc, and fb objects. If not available, |
* we don't bother to call the driver. |
1901,18 → 1686,16 |
if (!obj) { |
DRM_DEBUG_KMS("Unknown plane ID %d\n", |
plane_req->plane_id); |
return -ENOENT; |
ret = -ENOENT; |
goto out; |
} |
plane = obj_to_plane(obj); |
/* No fb means shut it down */ |
if (!plane_req->fb_id) { |
drm_modeset_lock_all(dev); |
old_fb = plane->fb; |
plane->funcs->disable_plane(plane); |
plane->crtc = NULL; |
plane->fb = NULL; |
drm_modeset_unlock_all(dev); |
goto out; |
} |
1926,13 → 1709,15 |
} |
crtc = obj_to_crtc(obj); |
fb = drm_framebuffer_lookup(dev, plane_req->fb_id); |
if (!fb) { |
obj = drm_mode_object_find(dev, plane_req->fb_id, |
DRM_MODE_OBJECT_FB); |
if (!obj) { |
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n", |
plane_req->fb_id); |
ret = -ENOENT; |
goto out; |
} |
fb = obj_to_fb(obj); |
/* Check whether this plane supports the fb pixel format. */ |
for (i = 0; i < plane->format_count; i++) |
1978,7 → 1763,6 |
goto out; |
} |
drm_modeset_lock_all(dev); |
ret = plane->funcs->update_plane(plane, crtc, fb, |
plane_req->crtc_x, plane_req->crtc_y, |
plane_req->crtc_w, plane_req->crtc_h, |
1985,58 → 1769,26 |
plane_req->src_x, plane_req->src_y, |
plane_req->src_w, plane_req->src_h); |
if (!ret) { |
old_fb = plane->fb; |
plane->crtc = crtc; |
plane->fb = fb; |
fb = NULL; |
} |
drm_modeset_unlock_all(dev); |
out: |
if (fb) |
drm_framebuffer_unreference(fb); |
if (old_fb) |
drm_framebuffer_unreference(old_fb); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
/** |
* drm_mode_set_config_internal - helper to call ->set_config |
* @set: modeset config to set |
* |
* This is a little helper to wrap internal calls to the ->set_config driver |
* interface. The only thing it adds is correct refcounting dance. |
*/ |
int drm_mode_set_config_internal(struct drm_mode_set *set) |
{ |
struct drm_crtc *crtc = set->crtc; |
struct drm_framebuffer *fb, *old_fb; |
int ret; |
old_fb = crtc->fb; |
fb = set->fb; |
ret = crtc->funcs->set_config(set); |
if (ret == 0) { |
if (old_fb) |
drm_framebuffer_unreference(old_fb); |
if (fb) |
drm_framebuffer_reference(fb); |
} |
return ret; |
} |
EXPORT_SYMBOL(drm_mode_set_config_internal); |
#if 0 |
/** |
* drm_mode_setcrtc - set CRTC configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Build a new CRTC configuration based on user request. |
* |
* Called by the user via ioctl. |
2066,7 → 1818,7 |
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX) |
return -ERANGE; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, crtc_req->crtc_id, |
DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
2088,16 → 1840,16 |
goto out; |
} |
fb = crtc->fb; |
/* Make refcounting symmetric with the lookup path. */ |
drm_framebuffer_reference(fb); |
} else { |
fb = drm_framebuffer_lookup(dev, crtc_req->fb_id); |
if (!fb) { |
obj = drm_mode_object_find(dev, crtc_req->fb_id, |
DRM_MODE_OBJECT_FB); |
if (!obj) { |
DRM_DEBUG_KMS("Unknown FB ID%d\n", |
crtc_req->fb_id); |
ret = -EINVAL; |
goto out; |
} |
fb = obj_to_fb(obj); |
} |
mode = drm_mode_create(dev); |
2194,15 → 1946,12 |
set.connectors = connector_set; |
set.num_connectors = crtc_req->count_connectors; |
set.fb = fb; |
ret = drm_mode_set_config_internal(&set); |
ret = crtc->funcs->set_config(&set); |
out: |
if (fb) |
drm_framebuffer_unreference(fb); |
kfree(connector_set); |
drm_mode_destroy(dev, mode); |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
2220,14 → 1969,15 |
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) |
return -EINVAL; |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); |
return -EINVAL; |
ret = -EINVAL; |
goto out; |
} |
crtc = obj_to_crtc(obj); |
mutex_lock(&crtc->mutex); |
if (req->flags & DRM_MODE_CURSOR_BO) { |
if (!crtc->funcs->cursor_set) { |
ret = -ENXIO; |
2247,8 → 1997,7 |
} |
} |
out: |
mutex_unlock(&crtc->mutex); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
2259,7 → 2008,7 |
switch (bpp) { |
case 8: |
fmt = DRM_FORMAT_C8; |
fmt = DRM_FORMAT_RGB332; |
break; |
case 16: |
if (depth == 15) |
2290,10 → 2039,14 |
#if 0 |
/** |
* drm_mode_addfb - add an FB to the graphics configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Add a new FB to the specified CRTC, given a user request. |
* |
* Called by the user via ioctl. |
2327,18 → 2080,24 |
if ((config->min_height > r.height) || (r.height > config->max_height)) |
return -EINVAL; |
mutex_lock(&dev->mode_config.mutex); |
/* TODO check buffer is sufficiently large */ |
/* TODO setup destructor callback */ |
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); |
if (IS_ERR(fb)) { |
DRM_DEBUG_KMS("could not create framebuffer\n"); |
return PTR_ERR(fb); |
ret = PTR_ERR(fb); |
goto out; |
} |
mutex_lock(&file_priv->fbs_lock); |
or->fb_id = fb->base.id; |
list_add(&fb->filp_head, &file_priv->fbs); |
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); |
mutex_unlock(&file_priv->fbs_lock); |
out: |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
2464,10 → 2223,14 |
/** |
* drm_mode_addfb2 - add an FB to the graphics configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Add a new FB to the specified CRTC, given a user request with format. |
* |
* Called by the user via ioctl. |
2506,28 → 2269,34 |
if (ret) |
return ret; |
mutex_lock(&dev->mode_config.mutex); |
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); |
if (IS_ERR(fb)) { |
DRM_DEBUG_KMS("could not create framebuffer\n"); |
return PTR_ERR(fb); |
ret = PTR_ERR(fb); |
goto out; |
} |
mutex_lock(&file_priv->fbs_lock); |
r->fb_id = fb->base.id; |
list_add(&fb->filp_head, &file_priv->fbs); |
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); |
mutex_unlock(&file_priv->fbs_lock); |
out: |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
/** |
* drm_mode_rmfb - remove an FB from the configuration |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Remove the FB specified by the user. |
* |
* Called by the user via ioctl. |
2538,50 → 2307,51 |
int drm_mode_rmfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv) |
{ |
struct drm_mode_object *obj; |
struct drm_framebuffer *fb = NULL; |
struct drm_framebuffer *fbl = NULL; |
uint32_t *id = data; |
int ret = 0; |
int found = 0; |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
mutex_lock(&file_priv->fbs_lock); |
mutex_lock(&dev->mode_config.fb_lock); |
fb = __drm_framebuffer_lookup(dev, *id); |
if (!fb) |
goto fail_lookup; |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); |
/* TODO check that we really get a framebuffer back. */ |
if (!obj) { |
ret = -EINVAL; |
goto out; |
} |
fb = obj_to_fb(obj); |
list_for_each_entry(fbl, &file_priv->fbs, filp_head) |
if (fb == fbl) |
found = 1; |
if (!found) |
goto fail_lookup; |
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */ |
__drm_framebuffer_unregister(dev, fb); |
if (!found) { |
ret = -EINVAL; |
goto out; |
} |
list_del_init(&fb->filp_head); |
mutex_unlock(&dev->mode_config.fb_lock); |
mutex_unlock(&file_priv->fbs_lock); |
drm_framebuffer_remove(fb); |
return 0; |
fail_lookup: |
mutex_unlock(&dev->mode_config.fb_lock); |
mutex_unlock(&file_priv->fbs_lock); |
return -EINVAL; |
out: |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
/** |
* drm_mode_getfb - get FB info |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Lookup the FB given its ID and return info about it. |
* |
* Called by the user via ioctl. |
2593,15 → 2363,20 |
void *data, struct drm_file *file_priv) |
{ |
struct drm_mode_fb_cmd *r = data; |
struct drm_mode_object *obj; |
struct drm_framebuffer *fb; |
int ret; |
int ret = 0; |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
fb = drm_framebuffer_lookup(dev, r->fb_id); |
if (!fb) |
return -EINVAL; |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); |
if (!obj) { |
ret = -EINVAL; |
goto out; |
} |
fb = obj_to_fb(obj); |
r->height = fb->height; |
r->width = fb->width; |
2608,13 → 2383,10 |
r->depth = fb->depth; |
r->bpp = fb->bits_per_pixel; |
r->pitch = fb->pitches[0]; |
if (fb->funcs->create_handle) |
ret = fb->funcs->create_handle(fb, file_priv, &r->handle); |
else |
ret = -ENODEV; |
fb->funcs->create_handle(fb, file_priv, &r->handle); |
drm_framebuffer_unreference(fb); |
out: |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
2624,6 → 2396,7 |
struct drm_clip_rect __user *clips_ptr; |
struct drm_clip_rect *clips = NULL; |
struct drm_mode_fb_dirty_cmd *r = data; |
struct drm_mode_object *obj; |
struct drm_framebuffer *fb; |
unsigned flags; |
int num_clips; |
2632,9 → 2405,13 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
fb = drm_framebuffer_lookup(dev, r->fb_id); |
if (!fb) |
return -EINVAL; |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); |
if (!obj) { |
ret = -EINVAL; |
goto out_err1; |
} |
fb = obj_to_fb(obj); |
num_clips = r->num_clips; |
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr; |
2672,19 → 2449,17 |
} |
if (fb->funcs->dirty) { |
drm_modeset_lock_all(dev); |
ret = fb->funcs->dirty(fb, file_priv, flags, r->color, |
clips, num_clips); |
drm_modeset_unlock_all(dev); |
} else { |
ret = -ENOSYS; |
goto out_err2; |
} |
out_err2: |
kfree(clips); |
out_err1: |
drm_framebuffer_unreference(fb); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
2691,8 → 2466,11 |
/** |
* drm_fb_release - remove and free the FBs on this file |
* @priv: drm file for the ioctl |
* @filp: file * from the ioctl |
* |
* LOCKING: |
* Takes mode config lock. |
* |
* Destroy all the FBs associated with @filp. |
* |
* Called by the user via ioctl. |
2705,20 → 2483,11 |
struct drm_device *dev = priv->minor->dev; |
struct drm_framebuffer *fb, *tfb; |
mutex_lock(&priv->fbs_lock); |
mutex_lock(&dev->mode_config.mutex); |
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) { |
mutex_lock(&dev->mode_config.fb_lock); |
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */ |
__drm_framebuffer_unregister(dev, fb); |
mutex_unlock(&dev->mode_config.fb_lock); |
list_del_init(&fb->filp_head); |
/* This will also drop the fpriv->fbs reference. */ |
drm_framebuffer_remove(fb); |
} |
mutex_unlock(&priv->fbs_lock); |
mutex_unlock(&dev->mode_config.mutex); |
} |
#endif |
2813,9 → 2582,10 |
/** |
* drm_fb_attachmode - Attach a user mode to an connector |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* This attaches a user specified mode to an connector. |
* Called by the user via ioctl. |
2836,7 → 2606,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); |
if (!obj) { |
2860,7 → 2630,7 |
drm_mode_attachmode(dev, connector, mode); |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
2867,9 → 2637,10 |
/** |
* drm_fb_detachmode - Detach a user specified mode from an connector |
* @dev: drm device for the ioctl |
* @data: data pointer for the ioctl |
* @file_priv: drm file for the ioctl call |
* @inode: inode from the ioctl |
* @filp: file * from the ioctl |
* @cmd: cmd from ioctl |
* @arg: arg from ioctl |
* |
* Called by the user via ioctl. |
* |
2889,7 → 2660,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); |
if (!obj) { |
2906,7 → 2677,7 |
ret = drm_mode_detachmode(dev, connector, &mode); |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
3154,7 → 2925,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); |
if (!obj) { |
ret = -EINVAL; |
3232,7 → 3003,7 |
out_resp->count_enum_blobs = blob_count; |
} |
done: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
3285,7 → 3056,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); |
if (!obj) { |
ret = -EINVAL; |
3303,7 → 3074,7 |
out_resp->length = blob->length; |
done: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
3406,7 → 3177,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); |
if (!obj) { |
3443,7 → 3214,7 |
} |
arg->count_props = props_count; |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
3460,7 → 3231,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type); |
if (!arg_obj) |
3498,7 → 3269,7 |
} |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
3562,7 → 3333,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
ret = -EINVAL; |
3603,7 → 3374,7 |
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
3621,7 → 3392,7 |
if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
return -EINVAL; |
drm_modeset_lock_all(dev); |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
ret = -EINVAL; |
3654,7 → 3425,7 |
goto out; |
} |
out: |
drm_modeset_unlock_all(dev); |
mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
3691,7 → 3462,6 |
int *bpp) |
{ |
switch (format) { |
case DRM_FORMAT_C8: |
case DRM_FORMAT_RGB332: |
case DRM_FORMAT_BGR233: |
*depth = 8; |
/drivers/video/drm/drm_edid.c |
---|
29,11 → 29,11 |
*/ |
#include <linux/kernel.h> |
#include <linux/slab.h> |
#include <linux/hdmi.h> |
#include <linux/i2c.h> |
#include <linux/module.h> |
#include <drm/drmP.h> |
#include <drm/drm_edid.h> |
#include "drm_edid_modes.h" |
#define version_greater(edid, maj, min) \ |
(((edid)->version > (maj)) || \ |
87,6 → 87,9 |
int product_id; |
u32 quirks; |
} edid_quirk_list[] = { |
/* ASUS VW222S */ |
{ "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, |
/* Acer AL1706 */ |
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, |
/* Acer F51 */ |
127,746 → 130,6 |
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING }, |
}; |
/* |
* Autogenerated from the DMT spec. |
* This table is copied from xfree86/modes/xf86EdidModes.c. |
*/ |
static const struct drm_display_mode drm_dmt_modes[] = { |
/* 640x350@85Hz */ |
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, |
736, 832, 0, 350, 382, 385, 445, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x400@85Hz */ |
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, |
736, 832, 0, 400, 401, 404, 445, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 720x400@85Hz */ |
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756, |
828, 936, 0, 400, 401, 404, 446, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 640x480@60Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, |
752, 800, 0, 480, 489, 492, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x480@72Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, |
704, 832, 0, 480, 489, 492, 520, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x480@75Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, |
720, 840, 0, 480, 481, 484, 500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x480@85Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696, |
752, 832, 0, 480, 481, 484, 509, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 800x600@56Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, |
896, 1024, 0, 600, 601, 603, 625, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@60Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, |
968, 1056, 0, 600, 601, 605, 628, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@72Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, |
976, 1040, 0, 600, 637, 643, 666, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@75Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, |
896, 1056, 0, 600, 601, 604, 625, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@85Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, |
896, 1048, 0, 600, 601, 604, 631, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@120Hz RB */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848, |
880, 960, 0, 600, 603, 607, 636, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 848x480@60Hz */ |
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, |
976, 1088, 0, 480, 486, 494, 517, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1024x768@43Hz, interlace */ |
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, |
1208, 1264, 0, 768, 768, 772, 817, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 1024x768@60Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, |
1184, 1344, 0, 768, 771, 777, 806, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1024x768@70Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, |
1184, 1328, 0, 768, 771, 777, 806, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1024x768@75Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, |
1136, 1312, 0, 768, 769, 772, 800, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1024x768@85Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, |
1168, 1376, 0, 768, 769, 772, 808, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1024x768@120Hz RB */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072, |
1104, 1184, 0, 768, 771, 775, 813, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1152x864@75Hz */ |
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, |
1344, 1600, 0, 864, 865, 868, 900, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x768@60Hz RB */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328, |
1360, 1440, 0, 768, 771, 778, 790, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x768@60Hz */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, |
1472, 1664, 0, 768, 771, 778, 798, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x768@75Hz */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360, |
1488, 1696, 0, 768, 771, 778, 805, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x768@85Hz */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, |
1496, 1712, 0, 768, 771, 778, 809, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x768@120Hz RB */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328, |
1360, 1440, 0, 768, 771, 778, 813, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x800@60Hz RB */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328, |
1360, 1440, 0, 800, 803, 809, 823, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x800@60Hz */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, |
1480, 1680, 0, 800, 803, 809, 831, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x800@75Hz */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360, |
1488, 1696, 0, 800, 803, 809, 838, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x800@85Hz */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, |
1496, 1712, 0, 800, 803, 809, 843, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x800@120Hz RB */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328, |
1360, 1440, 0, 800, 803, 809, 847, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x960@60Hz */ |
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, |
1488, 1800, 0, 960, 961, 964, 1000, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x960@85Hz */ |
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, |
1504, 1728, 0, 960, 961, 964, 1011, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x960@120Hz RB */ |
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328, |
1360, 1440, 0, 960, 963, 967, 1017, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x1024@60Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, |
1440, 1688, 0, 1024, 1025, 1028, 1066, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x1024@75Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, |
1440, 1688, 0, 1024, 1025, 1028, 1066, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x1024@85Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, |
1504, 1728, 0, 1024, 1025, 1028, 1072, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x1024@120Hz RB */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328, |
1360, 1440, 0, 1024, 1027, 1034, 1084, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1360x768@60Hz */ |
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, |
1536, 1792, 0, 768, 771, 777, 795, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1360x768@120Hz RB */ |
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408, |
1440, 1520, 0, 768, 771, 776, 813, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1400x1050@60Hz RB */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448, |
1480, 1560, 0, 1050, 1053, 1057, 1080, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1400x1050@60Hz */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, |
1632, 1864, 0, 1050, 1053, 1057, 1089, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1400x1050@75Hz */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, |
1648, 1896, 0, 1050, 1053, 1057, 1099, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1400x1050@85Hz */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, |
1656, 1912, 0, 1050, 1053, 1057, 1105, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1400x1050@120Hz RB */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448, |
1480, 1560, 0, 1050, 1053, 1057, 1112, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1440x900@60Hz RB */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488, |
1520, 1600, 0, 900, 903, 909, 926, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1440x900@60Hz */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, |
1672, 1904, 0, 900, 903, 909, 934, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x900@75Hz */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536, |
1688, 1936, 0, 900, 903, 909, 942, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x900@85Hz */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, |
1696, 1952, 0, 900, 903, 909, 948, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x900@120Hz RB */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488, |
1520, 1600, 0, 900, 903, 909, 953, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1600x1200@60Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@65Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@70Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@75Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@85Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@120Hz RB */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648, |
1680, 1760, 0, 1200, 1203, 1207, 1271, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1680x1050@60Hz RB */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728, |
1760, 1840, 0, 1050, 1053, 1059, 1080, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1680x1050@60Hz */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, |
1960, 2240, 0, 1050, 1053, 1059, 1089, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1680x1050@75Hz */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800, |
1976, 2272, 0, 1050, 1053, 1059, 1099, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1680x1050@85Hz */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, |
1984, 2288, 0, 1050, 1053, 1059, 1105, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1680x1050@120Hz RB */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728, |
1760, 1840, 0, 1050, 1053, 1059, 1112, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1792x1344@60Hz */ |
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, |
2120, 2448, 0, 1344, 1345, 1348, 1394, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1792x1344@75Hz */ |
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, |
2104, 2456, 0, 1344, 1345, 1348, 1417, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1792x1344@120Hz RB */ |
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840, |
1872, 1952, 0, 1344, 1347, 1351, 1423, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1856x1392@60Hz */ |
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, |
2176, 2528, 0, 1392, 1393, 1396, 1439, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1856x1392@75Hz */ |
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, |
2208, 2560, 0, 1392, 1395, 1399, 1500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1856x1392@120Hz RB */ |
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904, |
1936, 2016, 0, 1392, 1395, 1399, 1474, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1920x1200@60Hz RB */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968, |
2000, 2080, 0, 1200, 1203, 1209, 1235, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1920x1200@60Hz */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, |
2256, 2592, 0, 1200, 1203, 1209, 1245, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1200@75Hz */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056, |
2264, 2608, 0, 1200, 1203, 1209, 1255, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1200@85Hz */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, |
2272, 2624, 0, 1200, 1203, 1209, 1262, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1200@120Hz RB */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968, |
2000, 2080, 0, 1200, 1203, 1209, 1271, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1920x1440@60Hz */ |
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, |
2256, 2600, 0, 1440, 1441, 1444, 1500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1440@75Hz */ |
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, |
2288, 2640, 0, 1440, 1441, 1444, 1500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1440@120Hz RB */ |
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968, |
2000, 2080, 0, 1440, 1443, 1447, 1525, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 2560x1600@60Hz RB */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608, |
2640, 2720, 0, 1600, 1603, 1609, 1646, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 2560x1600@60Hz */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, |
3032, 3504, 0, 1600, 1603, 1609, 1658, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 2560x1600@75HZ */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768, |
3048, 3536, 0, 1600, 1603, 1609, 1672, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 2560x1600@85HZ */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, |
3048, 3536, 0, 1600, 1603, 1609, 1682, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 2560x1600@120Hz RB */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608, |
2640, 2720, 0, 1600, 1603, 1609, 1694, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
}; |
static const struct drm_display_mode edid_est_modes[] = { |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, |
968, 1056, 0, 600, 601, 605, 628, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, |
896, 1024, 0, 600, 601, 603, 625, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, |
720, 840, 0, 480, 481, 484, 500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, |
704, 832, 0, 480, 489, 491, 520, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704, |
768, 864, 0, 480, 483, 486, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656, |
752, 800, 0, 480, 490, 492, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */ |
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738, |
846, 900, 0, 400, 421, 423, 449, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */ |
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738, |
846, 900, 0, 400, 412, 414, 449, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, |
1440, 1688, 0, 1024, 1025, 1028, 1066, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040, |
1136, 1312, 0, 768, 769, 772, 800, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, |
1184, 1328, 0, 768, 771, 777, 806, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, |
1184, 1344, 0, 768, 771, 777, 806, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */ |
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032, |
1208, 1264, 0, 768, 768, 776, 817, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */ |
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864, |
928, 1152, 0, 624, 625, 628, 667, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, |
896, 1056, 0, 600, 601, 604, 625, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, |
976, 1040, 0, 600, 637, 643, 666, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */ |
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, |
1344, 1600, 0, 864, 865, 868, 900, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */ |
}; |
struct minimode { |
short w; |
short h; |
short r; |
short rb; |
}; |
static const struct minimode est3_modes[] = { |
/* byte 6 */ |
{ 640, 350, 85, 0 }, |
{ 640, 400, 85, 0 }, |
{ 720, 400, 85, 0 }, |
{ 640, 480, 85, 0 }, |
{ 848, 480, 60, 0 }, |
{ 800, 600, 85, 0 }, |
{ 1024, 768, 85, 0 }, |
{ 1152, 864, 75, 0 }, |
/* byte 7 */ |
{ 1280, 768, 60, 1 }, |
{ 1280, 768, 60, 0 }, |
{ 1280, 768, 75, 0 }, |
{ 1280, 768, 85, 0 }, |
{ 1280, 960, 60, 0 }, |
{ 1280, 960, 85, 0 }, |
{ 1280, 1024, 60, 0 }, |
{ 1280, 1024, 85, 0 }, |
/* byte 8 */ |
{ 1360, 768, 60, 0 }, |
{ 1440, 900, 60, 1 }, |
{ 1440, 900, 60, 0 }, |
{ 1440, 900, 75, 0 }, |
{ 1440, 900, 85, 0 }, |
{ 1400, 1050, 60, 1 }, |
{ 1400, 1050, 60, 0 }, |
{ 1400, 1050, 75, 0 }, |
/* byte 9 */ |
{ 1400, 1050, 85, 0 }, |
{ 1680, 1050, 60, 1 }, |
{ 1680, 1050, 60, 0 }, |
{ 1680, 1050, 75, 0 }, |
{ 1680, 1050, 85, 0 }, |
{ 1600, 1200, 60, 0 }, |
{ 1600, 1200, 65, 0 }, |
{ 1600, 1200, 70, 0 }, |
/* byte 10 */ |
{ 1600, 1200, 75, 0 }, |
{ 1600, 1200, 85, 0 }, |
{ 1792, 1344, 60, 0 }, |
{ 1792, 1344, 85, 0 }, |
{ 1856, 1392, 60, 0 }, |
{ 1856, 1392, 75, 0 }, |
{ 1920, 1200, 60, 1 }, |
{ 1920, 1200, 60, 0 }, |
/* byte 11 */ |
{ 1920, 1200, 75, 0 }, |
{ 1920, 1200, 85, 0 }, |
{ 1920, 1440, 60, 0 }, |
{ 1920, 1440, 75, 0 }, |
}; |
static const struct minimode extra_modes[] = { |
{ 1024, 576, 60, 0 }, |
{ 1366, 768, 60, 0 }, |
{ 1600, 900, 60, 0 }, |
{ 1680, 945, 60, 0 }, |
{ 1920, 1080, 60, 0 }, |
{ 2048, 1152, 60, 0 }, |
{ 2048, 1536, 60, 0 }, |
}; |
/* |
* Probably taken from CEA-861 spec. |
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c. |
*/ |
static const struct drm_display_mode edid_cea_modes[] = { |
/* 1 - 640x480@60Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, |
752, 800, 0, 480, 490, 492, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 2 - 720x480@60Hz */ |
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, |
798, 858, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 3 - 720x480@60Hz */ |
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736, |
798, 858, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 4 - 1280x720@60Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390, |
1430, 1650, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 5 - 1920x1080i@60Hz */ |
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, |
2052, 2200, 0, 1080, 1084, 1094, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 6 - 1440x480i@60Hz */ |
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
1602, 1716, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 7 - 1440x480i@60Hz */ |
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
1602, 1716, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 8 - 1440x240@60Hz */ |
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
1602, 1716, 0, 240, 244, 247, 262, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_DBLCLK) }, |
/* 9 - 1440x240@60Hz */ |
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478, |
1602, 1716, 0, 240, 244, 247, 262, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_DBLCLK) }, |
/* 10 - 2880x480i@60Hz */ |
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
3204, 3432, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 11 - 2880x480i@60Hz */ |
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
3204, 3432, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 12 - 2880x240@60Hz */ |
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
3204, 3432, 0, 240, 244, 247, 262, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 13 - 2880x240@60Hz */ |
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956, |
3204, 3432, 0, 240, 244, 247, 262, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 14 - 1440x480@60Hz */ |
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, |
1596, 1716, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 15 - 1440x480@60Hz */ |
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472, |
1596, 1716, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 16 - 1920x1080@60Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, |
2052, 2200, 0, 1080, 1084, 1089, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 17 - 720x576@50Hz */ |
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, |
796, 864, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 18 - 720x576@50Hz */ |
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732, |
796, 864, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 19 - 1280x720@50Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720, |
1760, 1980, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 20 - 1920x1080i@50Hz */ |
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, |
2492, 2640, 0, 1080, 1084, 1094, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 21 - 1440x576i@50Hz */ |
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
1590, 1728, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 22 - 1440x576i@50Hz */ |
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
1590, 1728, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 23 - 1440x288@50Hz */ |
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
1590, 1728, 0, 288, 290, 293, 312, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_DBLCLK) }, |
/* 24 - 1440x288@50Hz */ |
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464, |
1590, 1728, 0, 288, 290, 293, 312, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_DBLCLK) }, |
/* 25 - 2880x576i@50Hz */ |
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
3180, 3456, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 26 - 2880x576i@50Hz */ |
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
3180, 3456, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 27 - 2880x288@50Hz */ |
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
3180, 3456, 0, 288, 290, 293, 312, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 28 - 2880x288@50Hz */ |
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928, |
3180, 3456, 0, 288, 290, 293, 312, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 29 - 1440x576@50Hz */ |
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
1592, 1728, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 30 - 1440x576@50Hz */ |
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
1592, 1728, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 31 - 1920x1080@50Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, |
2492, 2640, 0, 1080, 1084, 1089, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 32 - 1920x1080@24Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558, |
2602, 2750, 0, 1080, 1084, 1089, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 33 - 1920x1080@25Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448, |
2492, 2640, 0, 1080, 1084, 1089, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 34 - 1920x1080@30Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008, |
2052, 2200, 0, 1080, 1084, 1089, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 35 - 2880x480@60Hz */ |
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, |
3192, 3432, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 36 - 2880x480@60Hz */ |
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944, |
3192, 3432, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 37 - 2880x576@50Hz */ |
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, |
3184, 3456, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 38 - 2880x576@50Hz */ |
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928, |
3184, 3456, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 39 - 1920x1080i@50Hz */ |
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952, |
2120, 2304, 0, 1080, 1126, 1136, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 40 - 1920x1080i@100Hz */ |
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448, |
2492, 2640, 0, 1080, 1084, 1094, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 41 - 1280x720@100Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720, |
1760, 1980, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 42 - 720x576@100Hz */ |
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, |
796, 864, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 43 - 720x576@100Hz */ |
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732, |
796, 864, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 44 - 1440x576i@100Hz */ |
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
1590, 1728, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_DBLCLK) }, |
/* 45 - 1440x576i@100Hz */ |
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464, |
1590, 1728, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_DBLCLK) }, |
/* 46 - 1920x1080i@120Hz */ |
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008, |
2052, 2200, 0, 1080, 1084, 1094, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 47 - 1280x720@120Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390, |
1430, 1650, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 48 - 720x480@120Hz */ |
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, |
798, 858, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 49 - 720x480@120Hz */ |
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736, |
798, 858, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 50 - 1440x480i@120Hz */ |
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, |
1602, 1716, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 51 - 1440x480i@120Hz */ |
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478, |
1602, 1716, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 52 - 720x576@200Hz */ |
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, |
796, 864, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 53 - 720x576@200Hz */ |
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732, |
796, 864, 0, 576, 581, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 54 - 1440x576i@200Hz */ |
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, |
1590, 1728, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 55 - 1440x576i@200Hz */ |
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464, |
1590, 1728, 0, 576, 580, 586, 625, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 56 - 720x480@240Hz */ |
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, |
798, 858, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 57 - 720x480@240Hz */ |
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736, |
798, 858, 0, 480, 489, 495, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 58 - 1440x480i@240 */ |
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, |
1602, 1716, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 59 - 1440x480i@240 */ |
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478, |
1602, 1716, 0, 480, 488, 494, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC | |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) }, |
/* 60 - 1280x720@24Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040, |
3080, 3300, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 61 - 1280x720@25Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700, |
3740, 3960, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 62 - 1280x720@30Hz */ |
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040, |
3080, 3300, 0, 720, 725, 730, 750, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 63 - 1920x1080@120Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008, |
2052, 2200, 0, 1080, 1084, 1089, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 64 - 1920x1080@100Hz */ |
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448, |
2492, 2640, 0, 1080, 1084, 1094, 1125, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
}; |
/*** DDC fetch and block validation ***/ |
static const u8 edid_header[] = { |
890,9 → 153,9 |
EXPORT_SYMBOL(drm_edid_header_is_valid); |
static int edid_fixup __read_mostly = 6; |
module_param_named(edid_fixup, edid_fixup, int, 0400); |
MODULE_PARM_DESC(edid_fixup, |
"Minimum number of valid EDID header bytes (0-8, default 6)"); |
//module_param_named(edid_fixup, edid_fixup, int, 0400); |
//MODULE_PARM_DESC(edid_fixup, |
// "Minimum number of valid EDID header bytes (0-8, default 6)"); |
/* |
* Sanity check the EDID block (base or extension). Return 0 if the block |
951,8 → 214,7 |
bad: |
if (raw_edid && print_bad_edid) { |
printk(KERN_ERR "Raw EDID:\n"); |
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, |
raw_edid, EDID_LENGTH, false); |
// print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); |
} |
return 0; |
} |
1044,9 → 306,12 |
static bool drm_edid_is_zero(u8 *in_edid, int length) |
{ |
if (memchr_inv(in_edid, 0, length)) |
int i; |
u32 *raw_edid = (u32 *)in_edid; |
for (i = 0; i < length / 4; i++) |
if (*(raw_edid + i) != 0) |
return false; |
return true; |
} |
1053,6 → 318,7 |
static u8 * |
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) |
{ |
size_t alloc_size; |
int i, j = 0, valid_extensions = 0; |
u8 *block, *new; |
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); |
1078,9 → 344,16 |
if (block[0x7e] == 0) |
return block; |
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); |
alloc_size = (block[0x7e] + 1) * EDID_LENGTH ; |
new = kmalloc(alloc_size, GFP_KERNEL); |
if (!new) |
goto out; |
memcpy(new, block, EDID_LENGTH); |
kfree(block); |
block = new; |
for (j = 1; j <= block[0x7e]; j++) { |
1094,22 → 367,20 |
break; |
} |
} |
if (i == 4 && print_bad_edid) { |
if (i == 4) |
dev_warn(connector->dev->dev, |
"%s: Ignoring invalid EDID block %d.\n", |
drm_get_connector_name(connector), j); |
connector->bad_edid_counter++; |
} |
} |
if (valid_extensions != block[0x7e]) { |
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions; |
block[0x7e] = valid_extensions; |
new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); |
new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); |
if (!new) |
goto out; |
memcpy(new, block, alloc_size); |
kfree(block); |
block = new; |
} |
1282,7 → 553,7 |
{ |
int i; |
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { |
for (i = 0; i < drm_num_dmt_modes; i++) { |
const struct drm_display_mode *ptr = &drm_dmt_modes[i]; |
if (hsize != ptr->hdisplay) |
continue; |
1634,7 → 905,7 |
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo; |
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo; |
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo; |
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4; |
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4; |
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf); |
/* ignore tiny modes */ |
1715,7 → 986,6 |
} |
mode->type = DRM_MODE_TYPE_DRIVER; |
mode->vrefresh = drm_mode_vrefresh(mode); |
drm_mode_set_name(mode); |
return mode; |
1824,7 → 1094,7 |
struct drm_display_mode *newmode; |
struct drm_device *dev = connector->dev; |
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) { |
for (i = 0; i < drm_num_dmt_modes; i++) { |
if (mode_in_range(drm_dmt_modes + i, edid, timing) && |
valid_inferred_mode(connector, drm_dmt_modes + i)) { |
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); |
1859,7 → 1129,7 |
struct drm_display_mode *newmode; |
struct drm_device *dev = connector->dev; |
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { |
for (i = 0; i < num_extra_modes; i++) { |
const struct minimode *m = &extra_modes[i]; |
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0); |
if (!newmode) |
1888,7 → 1158,7 |
struct drm_device *dev = connector->dev; |
bool rb = drm_monitor_supports_rb(edid); |
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) { |
for (i = 0; i < num_extra_modes; i++) { |
const struct minimode *m = &extra_modes[i]; |
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0); |
if (!newmode) |
2225,11 → 1495,9 |
#define VIDEO_BLOCK 0x02 |
#define VENDOR_BLOCK 0x03 |
#define SPEAKER_BLOCK 0x04 |
#define VIDEO_CAPABILITY_BLOCK 0x07 |
#define EDID_BASIC_AUDIO (1 << 6) |
#define EDID_CEA_YCRCB444 (1 << 5) |
#define EDID_CEA_YCRCB422 (1 << 4) |
#define EDID_CEA_VCDB_QS (1 << 6) |
/** |
* Search EDID for CEA extension block. |
2257,19 → 1525,16 |
} |
EXPORT_SYMBOL(drm_find_cea_extension); |
/** |
* drm_match_cea_mode - look for a CEA mode matching given mode |
* @to_match: display mode |
* |
* Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861 |
* mode. |
/* |
* Looks for a CEA mode matching given drm_display_mode. |
* Returns its CEA Video ID code, or 0 if not found. |
*/ |
u8 drm_match_cea_mode(const struct drm_display_mode *to_match) |
u8 drm_match_cea_mode(struct drm_display_mode *to_match) |
{ |
struct drm_display_mode *cea_mode; |
u8 mode; |
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) { |
for (mode = 0; mode < drm_num_cea_modes; mode++) { |
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode]; |
if (drm_mode_equal(to_match, cea_mode)) |
2289,7 → 1554,7 |
for (mode = db; mode < db + len; mode++) { |
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */ |
if (cea_mode < ARRAY_SIZE(edid_cea_modes)) { |
if (cea_mode < drm_num_cea_modes) { |
struct drm_display_mode *newmode; |
newmode = drm_mode_duplicate(dev, |
&edid_cea_modes[cea_mode]); |
2649,37 → 1914,6 |
EXPORT_SYMBOL(drm_detect_monitor_audio); |
/** |
* drm_rgb_quant_range_selectable - is RGB quantization range selectable? |
* |
* Check whether the monitor reports the RGB quantization range selection |
* as supported. The AVI infoframe can then be used to inform the monitor |
* which quantization range (full or limited) is used. |
*/ |
bool drm_rgb_quant_range_selectable(struct edid *edid) |
{ |
u8 *edid_ext; |
int i, start, end; |
edid_ext = drm_find_cea_extension(edid); |
if (!edid_ext) |
return false; |
if (cea_db_offsets(edid_ext, &start, &end)) |
return false; |
for_each_cea_db(edid_ext, i, start, end) { |
if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK && |
cea_db_payload_len(&edid_ext[i]) == 2) { |
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]); |
return edid_ext[i + 2] & EDID_CEA_VCDB_QS; |
} |
} |
return false; |
} |
EXPORT_SYMBOL(drm_rgb_quant_range_selectable); |
/** |
* drm_add_display_info - pull display info out if present |
* @edid: EDID data |
* @info: display info (attached to connector) |
2798,7 → 2032,6 |
num_modes += add_cvt_modes(connector, edid); |
num_modes += add_standard_modes(connector, edid); |
num_modes += add_established_modes(connector, edid); |
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) |
num_modes += add_inferred_modes(connector, edid); |
num_modes += add_cea_modes(connector, edid); |
2860,33 → 2093,20 |
EXPORT_SYMBOL(drm_add_modes_noedid); |
/** |
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with |
* data from a DRM display mode |
* @frame: HDMI AVI infoframe |
* @mode: DRM display mode |
* drm_mode_cea_vic - return the CEA-861 VIC of a given mode |
* @mode: mode |
* |
* Returns 0 on success or a negative error code on failure. |
* RETURNS: |
* The VIC number, 0 in case it's not a CEA-861 mode. |
*/ |
int |
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, |
const struct drm_display_mode *mode) |
uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode) |
{ |
int err; |
uint8_t i; |
if (!frame || !mode) |
return -EINVAL; |
for (i = 0; i < drm_num_cea_modes; i++) |
if (drm_mode_equal(mode, &edid_cea_modes[i])) |
return i + 1; |
err = hdmi_avi_infoframe_init(frame); |
if (err < 0) |
return err; |
frame->video_code = drm_match_cea_mode(mode); |
if (!frame->video_code) |
return 0; |
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; |
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE; |
return 0; |
} |
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode); |
EXPORT_SYMBOL(drm_mode_cea_vic); |
/drivers/video/drm/drm_mm.c |
---|
102,6 → 102,20 |
} |
EXPORT_SYMBOL(drm_mm_pre_get); |
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) |
{ |
return hole_node->start + hole_node->size; |
} |
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) |
{ |
struct drm_mm_node *next_node = |
list_entry(hole_node->node_list.next, struct drm_mm_node, |
node_list); |
return next_node->start; |
} |
static void drm_mm_insert_helper(struct drm_mm_node *hole_node, |
struct drm_mm_node *node, |
unsigned long size, unsigned alignment, |
113,7 → 127,7 |
unsigned long adj_start = hole_start; |
unsigned long adj_end = hole_end; |
BUG_ON(node->allocated); |
BUG_ON(!hole_node->hole_follows || node->allocated); |
if (mm->color_adjust) |
mm->color_adjust(hole_node, color, &adj_start, &adj_end); |
141,57 → 155,12 |
BUG_ON(node->start + node->size > adj_end); |
node->hole_follows = 0; |
if (__drm_mm_hole_node_start(node) < hole_end) { |
if (node->start + node->size < hole_end) { |
list_add(&node->hole_stack, &mm->hole_stack); |
node->hole_follows = 1; |
} |
} |
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm, |
unsigned long start, |
unsigned long size, |
bool atomic) |
{ |
struct drm_mm_node *hole, *node; |
unsigned long end = start + size; |
unsigned long hole_start; |
unsigned long hole_end; |
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { |
if (hole_start > start || hole_end < end) |
continue; |
node = drm_mm_kmalloc(mm, atomic); |
if (unlikely(node == NULL)) |
return NULL; |
node->start = start; |
node->size = size; |
node->mm = mm; |
node->allocated = 1; |
INIT_LIST_HEAD(&node->hole_stack); |
list_add(&node->node_list, &hole->node_list); |
if (start == hole_start) { |
hole->hole_follows = 0; |
list_del_init(&hole->hole_stack); |
} |
node->hole_follows = 0; |
if (end != hole_end) { |
list_add(&node->hole_stack, &mm->hole_stack); |
node->hole_follows = 1; |
} |
return node; |
} |
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size); |
return NULL; |
} |
EXPORT_SYMBOL(drm_mm_create_block); |
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node, |
unsigned long size, |
unsigned alignment, |
284,7 → 253,7 |
BUG_ON(node->start + node->size > end); |
node->hole_follows = 0; |
if (__drm_mm_hole_node_start(node) < hole_end) { |
if (node->start + node->size < hole_end) { |
list_add(&node->hole_stack, &mm->hole_stack); |
node->hole_follows = 1; |
} |
358,14 → 327,13 |
list_entry(node->node_list.prev, struct drm_mm_node, node_list); |
if (node->hole_follows) { |
BUG_ON(__drm_mm_hole_node_start(node) == |
__drm_mm_hole_node_end(node)); |
BUG_ON(drm_mm_hole_node_start(node) |
== drm_mm_hole_node_end(node)); |
list_del(&node->hole_stack); |
} else |
BUG_ON(__drm_mm_hole_node_start(node) != |
__drm_mm_hole_node_end(node)); |
BUG_ON(drm_mm_hole_node_start(node) |
!= drm_mm_hole_node_end(node)); |
if (!prev_node->hole_follows) { |
prev_node->hole_follows = 1; |
list_add(&prev_node->hole_stack, &mm->hole_stack); |
422,8 → 390,6 |
{ |
struct drm_mm_node *entry; |
struct drm_mm_node *best; |
unsigned long adj_start; |
unsigned long adj_end; |
unsigned long best_size; |
BUG_ON(mm->scanned_blocks); |
431,7 → 397,10 |
best = NULL; |
best_size = ~0UL; |
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { |
list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
unsigned long adj_start = drm_mm_hole_node_start(entry); |
unsigned long adj_end = drm_mm_hole_node_end(entry); |
if (mm->color_adjust) { |
mm->color_adjust(entry, color, &adj_start, &adj_end); |
if (adj_end <= adj_start) |
438,6 → 407,7 |
continue; |
} |
BUG_ON(!entry->hole_follows); |
if (!check_free_hole(adj_start, adj_end, size, alignment)) |
continue; |
464,8 → 434,6 |
{ |
struct drm_mm_node *entry; |
struct drm_mm_node *best; |
unsigned long adj_start; |
unsigned long adj_end; |
unsigned long best_size; |
BUG_ON(mm->scanned_blocks); |
473,12 → 441,14 |
best = NULL; |
best_size = ~0UL; |
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) { |
if (adj_start < start) |
adj_start = start; |
if (adj_end > end) |
adj_end = end; |
list_for_each_entry(entry, &mm->hole_stack, hole_stack) { |
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ? |
start : drm_mm_hole_node_start(entry); |
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ? |
end : drm_mm_hole_node_end(entry); |
BUG_ON(!entry->hole_follows); |
if (mm->color_adjust) { |
mm->color_adjust(entry, color, &adj_start, &adj_end); |
if (adj_end <= adj_start) |
/drivers/video/drm/drm_modes.c |
---|
504,75 → 504,7 |
} |
EXPORT_SYMBOL(drm_gtf_mode); |
#if IS_ENABLED(CONFIG_VIDEOMODE) |
int drm_display_mode_from_videomode(const struct videomode *vm, |
struct drm_display_mode *dmode) |
{ |
dmode->hdisplay = vm->hactive; |
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch; |
dmode->hsync_end = dmode->hsync_start + vm->hsync_len; |
dmode->htotal = dmode->hsync_end + vm->hback_porch; |
dmode->vdisplay = vm->vactive; |
dmode->vsync_start = dmode->vdisplay + vm->vfront_porch; |
dmode->vsync_end = dmode->vsync_start + vm->vsync_len; |
dmode->vtotal = dmode->vsync_end + vm->vback_porch; |
dmode->clock = vm->pixelclock / 1000; |
dmode->flags = 0; |
if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH) |
dmode->flags |= DRM_MODE_FLAG_PHSYNC; |
else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW) |
dmode->flags |= DRM_MODE_FLAG_NHSYNC; |
if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH) |
dmode->flags |= DRM_MODE_FLAG_PVSYNC; |
else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW) |
dmode->flags |= DRM_MODE_FLAG_NVSYNC; |
if (vm->data_flags & DISPLAY_FLAGS_INTERLACED) |
dmode->flags |= DRM_MODE_FLAG_INTERLACE; |
if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN) |
dmode->flags |= DRM_MODE_FLAG_DBLSCAN; |
drm_mode_set_name(dmode); |
return 0; |
} |
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode); |
#endif |
#if IS_ENABLED(CONFIG_OF_VIDEOMODE) |
/** |
* of_get_drm_display_mode - get a drm_display_mode from devicetree |
* @np: device_node with the timing specification |
* @dmode: will be set to the return value |
* @index: index into the list of display timings in devicetree |
* |
* This function is expensive and should only be used, if only one mode is to be |
* read from DT. To get multiple modes start with of_get_display_timings and |
* work with that instead. |
*/ |
int of_get_drm_display_mode(struct device_node *np, |
struct drm_display_mode *dmode, int index) |
{ |
struct videomode vm; |
int ret; |
ret = of_get_videomode(np, &vm, index); |
if (ret) |
return ret; |
drm_display_mode_from_videomode(&vm, dmode); |
pr_debug("%s: got %dx%d display mode from %s\n", |
of_node_full_name(np), vm.hactive, vm.vactive, np->name); |
drm_mode_debug_printmodeline(dmode); |
return 0; |
} |
EXPORT_SYMBOL_GPL(of_get_drm_display_mode); |
#endif |
/** |
* drm_mode_set_name - set the name on a mode |
* @mode: name will be set in this mode |
* |
/drivers/video/drm/drm_irq.c |
---|
111,7 → 111,6 |
/* Valid dotclock? */ |
if (dotclock > 0) { |
int frame_size; |
/* Convert scanline length in pixels and video dot clock to |
* line duration, frame duration and pixel duration in |
* nanoseconds: |
119,10 → 118,7 |
pixeldur_ns = (s64) div64_u64(1000000000, dotclock); |
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal * |
1000000000), dotclock); |
frame_size = crtc->hwmode.crtc_htotal * |
crtc->hwmode.crtc_vtotal; |
framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000, |
dotclock); |
framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns; |
} else |
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n", |
crtc->base.id); |
/drivers/video/drm/drm_pci.c |
---|
88,6 → 88,7 |
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask) |
{ |
struct pci_dev *root; |
int pos; |
u32 lnkcap, lnkcap2; |
*mask = 0; |
102,15 → 103,22 |
#if 0 |
root = dev->pdev->bus->self; |
/* we've been informed via and serverworks don't make the cut */ |
if (root->vendor == PCI_VENDOR_ID_VIA || |
root->vendor == PCI_VENDOR_ID_SERVERWORKS) |
pos = pci_pcie_cap(root); |
if (!pos) |
return -EINVAL; |
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap); |
pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2); |
/* we've been informed via and serverworks don't make the cut */ |
// if (root->vendor == PCI_VENDOR_ID_VIA || |
// root->vendor == PCI_VENDOR_ID_SERVERWORKS) |
// return -EINVAL; |
if (lnkcap2) { /* PCIe r3.0-compliant */ |
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap); |
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2); |
lnkcap &= PCI_EXP_LNKCAP_SLS; |
lnkcap2 &= 0xfe; |
if (lnkcap2) { /* PCIE GEN 3.0 */ |
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB) |
*mask |= DRM_PCIE_SPEED_25; |
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB) |
117,11 → 125,11 |
*mask |= DRM_PCIE_SPEED_50; |
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB) |
*mask |= DRM_PCIE_SPEED_80; |
} else { /* pre-r3.0 */ |
if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB) |
} else { |
if (lnkcap & 1) |
*mask |= DRM_PCIE_SPEED_25; |
if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB) |
*mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50); |
if (lnkcap & 2) |
*mask |= DRM_PCIE_SPEED_50; |
} |
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2); |