Subversion Repositories Kolibri OS

Compare Revisions

No changes between revisions

Regard whitespace Rev 3479 → Rev 3480

/drivers/ddk/core.S
18,6 → 18,9
.global _DestroyEvent
.global _DestroyObject
 
.global _DiskAdd
.global _DiskMediaChanged
 
.global _FreeKernelSpace
.global _FreePage
 
82,6 → 85,9
.def _DestroyEvent; .scl 2; .type 32; .endef
.def _DestroyObject; .scl 2; .type 32; .endef
 
.def _DiskAdd; .scl 2; .type 32; .endef
.def _DiskMediaChanged; .scl 2; .type 32; .endef
 
.def _FreeKernelSpace; .scl 2; .type 32; .endef
.def _FreePage; .scl 2; .type 32; .endef
 
146,6 → 152,9
_DestroyEvent:
_DestroyObject:
 
_DiskAdd:
_DiskMediaChanged:
 
_FreeKernelSpace:
_FreePage:
 
214,6 → 223,9
.ascii " -export:DestroyEvent"
.ascii " -export:DestroyObject"
 
.ascii " -export:DiskAdd" # stdcall
.ascii " -export:DiskMediaChanged" # stdcall
 
.ascii " -export:FreeKernelSpace" # stdcall
.ascii " -export:FreePage" #
 
/drivers/include/drm/drm_crtc.h
817,7 → 817,7
/* output poll support */
bool poll_enabled;
bool poll_running;
// struct delayed_work output_poll_work;
struct delayed_work output_poll_work;
 
/* pointers to standard properties */
struct list_head property_blob_list;
/drivers/include/linux/bug.h
58,6 → 58,8
 
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
 
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) \
BUILD_BUG_ON((n) == 0 || (((n) & ((n) - 1)) != 0))
 
 
#endif
/drivers/include/linux/compiler-gcc.h
5,6 → 5,9
/*
* Common definitions for all gcc versions go here.
*/
#define GCC_VERSION (__GNUC__ * 10000 \
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
 
 
/* Optimization barrier */
/drivers/include/linux/compiler-gcc3.h
2,22 → 2,22
#error "Please don't include <linux/compiler-gcc3.h> directly, include <linux/compiler.h> instead."
#endif
 
#if __GNUC_MINOR__ < 2
#if GCC_VERSION < 30200
# error Sorry, your compiler is too old - please upgrade it.
#endif
 
#if __GNUC_MINOR__ >= 3
#if GCC_VERSION >= 30300
# define __used __attribute__((__used__))
#else
# define __used __attribute__((__unused__))
#endif
 
#if __GNUC_MINOR__ >= 4
#if GCC_VERSION >= 30400
#define __must_check __attribute__((warn_unused_result))
#endif
 
#ifdef CONFIG_GCOV_KERNEL
# if __GNUC_MINOR__ < 4
# if GCC_VERSION < 30400
# error "GCOV profiling support for gcc versions below 3.4 not included"
# endif /* __GNUC_MINOR__ */
#endif /* CONFIG_GCOV_KERNEL */
/drivers/include/linux/compiler-gcc4.h
4,7 → 4,7
 
/* GCC 4.1.[01] miscompiles __weak */
#ifdef __KERNEL__
# if __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ <= 1
# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
# error Your version of gcc miscompiles the __weak directive
# endif
#endif
13,7 → 13,11
#define __must_check __attribute__((warn_unused_result))
#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
 
#if __GNUC_MINOR__ >= 3
#if GCC_VERSION >= 40100
# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
 
#if GCC_VERSION >= 40300
/* Mark functions as cold. gcc will assume any path leading to a call
to them will be unlikely. This means a lot of manual unlikely()s
are unnecessary now for any paths leading to the usual suspects
29,9 → 33,15
the kernel context */
#define __cold __attribute__((__cold__))
 
#define __linktime_error(message) __attribute__((__error__(message)))
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
 
#if __GNUC_MINOR__ >= 5
#ifndef __CHECKER__
# define __compiletime_warning(message) __attribute__((warning(message)))
# define __compiletime_error(message) __attribute__((error(message)))
#endif /* __CHECKER__ */
#endif /* GCC_VERSION >= 40300 */
 
#if GCC_VERSION >= 40500
/*
* Mark a position in code as unreachable. This can be used to
* suppress control flow warnings after asm blocks that transfer
46,10 → 56,9
/* Mark a function definition as prohibited from being cloned. */
#define __noclone __attribute__((__noclone__))
 
#endif
#endif
#endif /* GCC_VERSION >= 40500 */
 
#if __GNUC_MINOR__ >= 6
#if GCC_VERSION >= 40600
/*
* Tell the optimizer that something else uses this function or variable.
*/
56,20 → 65,13
#define __visible __attribute__((externally_visible))
#endif
 
#if __GNUC_MINOR__ > 0
#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#endif
#if __GNUC_MINOR__ >= 3 && !defined(__CHECKER__)
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if __GNUC_MINOR__ >= 4
#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif
#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
/drivers/include/linux/compiler.h
170,6 → 170,11
(typeof(ptr)) (__ptr + (off)); })
#endif
 
/* Not-quite-unique ID. */
#ifndef __UNIQUE_ID
# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
#endif
 
#endif /* __KERNEL__ */
 
#endif /* __ASSEMBLY__ */
302,10 → 307,36
#endif
#ifndef __compiletime_error
# define __compiletime_error(message)
# define __compiletime_error_fallback(condition) \
do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
#else
# define __compiletime_error_fallback(condition) do { } while (0)
#endif
#ifndef __linktime_error
# define __linktime_error(message)
#endif
 
#define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
bool __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (__cond) \
prefix ## suffix(); \
__compiletime_error_fallback(__cond); \
} while (0)
 
#define _compiletime_assert(condition, msg, prefix, suffix) \
__compiletime_assert(condition, msg, prefix, suffix)
 
/**
* compiletime_assert - break build and emit msg if condition is false
* @condition: a compile-time constant condition to check
* @msg: a message to emit if condition is false
*
* In tradition of POSIX assert, this macro will break the build if the
* supplied condition is *false*, emitting the supplied error message if the
* compiler has support to do so.
*/
#define compiletime_assert(condition, msg) \
_compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
 
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
/drivers/include/linux/kernel.h
87,10 → 87,16
return buf;
}
 
extern int hex_to_bin(char ch);
extern void hex2bin(u8 *dst, const char *src, size_t count);
enum {
DUMP_PREFIX_NONE,
DUMP_PREFIX_ADDRESS,
DUMP_PREFIX_OFFSET
};
 
int hex_to_bin(char ch);
int hex2bin(u8 *dst, const char *src, size_t count);
 
 
//int printk(const char *fmt, ...);
 
#define printk(fmt, arg...) dbgprintf(fmt , ##arg)
335,9 → 341,9
#define dev_info(dev, format, arg...) \
printk("Info %s " format , __func__, ## arg)
 
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
//#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
#define BUILD_BUG_ON(condition)
 
 
struct page
{
unsigned int addr;
/drivers/include/linux/wait.h
172,6 → 172,13
struct work_struct work;
};
 
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
{
return container_of(work, struct delayed_work, work);
}
 
 
 
struct workqueue_struct *alloc_workqueue_key(const char *fmt,
unsigned int flags, int max_active);
 
182,6 → 189,13
int queue_delayed_work(struct workqueue_struct *wq,
struct delayed_work *dwork, unsigned long delay);
 
#define INIT_WORK(_work, _func) \
do { \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = _func; \
} while (0)
 
 
#define INIT_DELAYED_WORK(_work, _func) \
do { \
INIT_LIST_HEAD(&(_work)->work.entry); \
207,5 → 221,7
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
 
 
 
 
#endif
 
/drivers/video/drm/drm_crtc.c
37,6 → 37,54
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
 
/**
* drm_modeset_lock_all - take all modeset locks
* @dev: drm device
*
* This function takes all modeset locks, suitable where a more fine-grained
* scheme isn't (yet) implemented.
*/
void drm_modeset_lock_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
mutex_lock(&dev->mode_config.mutex);
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_lock_all);
 
/**
* drm_modeset_unlock_all - drop all modeset locks
* @dev: device
*/
void drm_modeset_unlock_all(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
mutex_unlock(&crtc->mutex);
 
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_modeset_unlock_all);
 
/**
* drm_warn_on_modeset_not_all_locked - check that all modeset locks are locked
* @dev: device
*/
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
{
struct drm_crtc *crtc;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
WARN_ON(!mutex_is_locked(&crtc->mutex));
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
}
EXPORT_SYMBOL(drm_warn_on_modeset_not_all_locked);
 
/* Avoid boilerplate. I'm tired of typing. */
#define DRM_ENUM_NAME_FN(fnname, list) \
char *fnname(int val) \
203,13 → 251,11
}
 
/**
* drm_mode_object_get - allocate a new identifier
* drm_mode_object_get - allocate a new modeset identifier
* @dev: DRM device
* @ptr: object pointer, used to generate unique ID
* @type: object type
* @obj: object pointer, used to generate unique ID
* @obj_type: object type
*
* LOCKING:
*
* Create a unique identifier based on @ptr in @dev's identifier space. Used
* for tracking modes, CRTCs and connectors.
*
220,36 → 266,28
static int drm_mode_object_get(struct drm_device *dev,
struct drm_mode_object *obj, uint32_t obj_type)
{
int new_id = 0;
int ret;
 
again:
if (idr_pre_get(&dev->mode_config.crtc_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Ran out memory getting a mode number\n");
return -ENOMEM;
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_alloc(&dev->mode_config.crtc_idr, obj, 1, 0, GFP_KERNEL);
if (ret >= 0) {
/*
* Set up the object linking under the protection of the idr
* lock so that other users can't see inconsistent state.
*/
obj->id = ret;
obj->type = obj_type;
}
 
mutex_lock(&dev->mode_config.idr_mutex);
ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id);
mutex_unlock(&dev->mode_config.idr_mutex);
if (ret == -EAGAIN)
goto again;
else if (ret)
return ret;
 
obj->id = new_id;
obj->type = obj_type;
return 0;
return ret < 0 ? ret : 0;
}
 
/**
* drm_mode_object_put - free an identifer
* drm_mode_object_put - free a modeset identifer
* @dev: DRM device
* @id: ID to free
* @object: object to free
*
* LOCKING:
* Caller must hold DRM mode_config lock.
*
* Free @id from @dev's unique identifier pool.
*/
static void drm_mode_object_put(struct drm_device *dev,
260,11 → 298,24
mutex_unlock(&dev->mode_config.idr_mutex);
}
 
/**
* drm_mode_object_find - look up a drm object with static lifetime
* @dev: drm device
* @id: id of the mode object
* @type: type of the mode object
*
* Note that framebuffers cannot be looked up with this functions - since those
* are reference counted, they need special treatment.
*/
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type)
{
struct drm_mode_object *obj = NULL;
 
/* Framebuffers are reference counted and need their own lookup
* function.*/
WARN_ON(type == DRM_MODE_OBJECT_FB);
 
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != type) || (obj->id != id))
278,13 → 329,18
/**
* drm_framebuffer_init - initialize a framebuffer
* @dev: DRM device
* @fb: framebuffer to be initialized
* @funcs: ... with these functions
*
* LOCKING:
* Caller must hold mode config lock.
*
* Allocates an ID for the framebuffer's parent mode object, sets its mode
* functions & device file and adds it to the master fd list.
*
* IMPORTANT:
* This functions publishes the fb and makes it available for concurrent access
* by other users. Which means by this point the fb _must_ be fully set up -
* since all the fb attributes are invariant over its lifetime, no further
* locking but only correct reference counting is required.
*
* RETURNS:
* Zero on success, error code on failure.
*/
293,49 → 349,242
{
int ret;
 
mutex_lock(&dev->mode_config.fb_lock);
kref_init(&fb->refcount);
INIT_LIST_HEAD(&fb->filp_head);
fb->dev = dev;
fb->funcs = funcs;
 
ret = drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB);
if (ret)
return ret;
goto out;
 
fb->dev = dev;
fb->funcs = funcs;
/* Grab the idr reference. */
drm_framebuffer_reference(fb);
 
dev->mode_config.num_fb++;
list_add(&fb->head, &dev->mode_config.fb_list);
out:
mutex_unlock(&dev->mode_config.fb_lock);
 
return 0;
}
EXPORT_SYMBOL(drm_framebuffer_init);
 
static void drm_framebuffer_free(struct kref *kref)
{
struct drm_framebuffer *fb =
container_of(kref, struct drm_framebuffer, refcount);
fb->funcs->destroy(fb);
}
 
static struct drm_framebuffer *__drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id)
{
struct drm_mode_object *obj = NULL;
struct drm_framebuffer *fb;
 
mutex_lock(&dev->mode_config.idr_mutex);
obj = idr_find(&dev->mode_config.crtc_idr, id);
if (!obj || (obj->type != DRM_MODE_OBJECT_FB) || (obj->id != id))
fb = NULL;
else
fb = obj_to_fb(obj);
mutex_unlock(&dev->mode_config.idr_mutex);
 
return fb;
}
 
/**
* drm_framebuffer_lookup - look up a drm framebuffer and grab a reference
* @dev: drm device
* @id: id of the fb object
*
* If successful, this grabs an additional reference to the framebuffer -
* callers need to make sure to eventually unreference the returned framebuffer
* again.
*/
struct drm_framebuffer *drm_framebuffer_lookup(struct drm_device *dev,
uint32_t id)
{
struct drm_framebuffer *fb;
 
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, id);
if (fb)
kref_get(&fb->refcount);
mutex_unlock(&dev->mode_config.fb_lock);
 
return fb;
}
EXPORT_SYMBOL(drm_framebuffer_lookup);
 
/**
* drm_framebuffer_unreference - unref a framebuffer
* @fb: framebuffer to unref
*
* This functions decrements the fb's refcount and frees it if it drops to zero.
*/
void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
kref_put(&fb->refcount, drm_framebuffer_free);
}
EXPORT_SYMBOL(drm_framebuffer_unreference);
 
/**
* drm_framebuffer_reference - incr the fb refcnt
* @fb: framebuffer
*/
void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
kref_get(&fb->refcount);
}
EXPORT_SYMBOL(drm_framebuffer_reference);
 
static void drm_framebuffer_free_bug(struct kref *kref)
{
BUG();
}
 
static void __drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
DRM_DEBUG("FB ID: %d\n", fb->base.id);
kref_put(&fb->refcount, drm_framebuffer_free_bug);
}
 
/* dev->mode_config.fb_lock must be held! */
static void __drm_framebuffer_unregister(struct drm_device *dev,
struct drm_framebuffer *fb)
{
mutex_lock(&dev->mode_config.idr_mutex);
idr_remove(&dev->mode_config.crtc_idr, fb->base.id);
mutex_unlock(&dev->mode_config.idr_mutex);
 
fb->base.id = 0;
 
__drm_framebuffer_unreference(fb);
}
 
/**
* drm_framebuffer_unregister_private - unregister a private fb from the lookup idr
* @fb: fb to unregister
*
* Drivers need to call this when cleaning up driver-private framebuffers, e.g.
* those used for fbdev. Note that the caller must hold a reference of it's own,
* i.e. the object may not be destroyed through this call (since it'll lead to a
* locking inversion).
*/
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
 
mutex_lock(&dev->mode_config.fb_lock);
/* Mark fb as reaped and drop idr ref. */
__drm_framebuffer_unregister(dev, fb);
mutex_unlock(&dev->mode_config.fb_lock);
}
EXPORT_SYMBOL(drm_framebuffer_unregister_private);
 
/**
* drm_framebuffer_cleanup - remove a framebuffer object
* @fb: framebuffer to remove
*
* LOCKING:
* Caller must hold mode config lock.
* Cleanup references to a user-created framebuffer. This function is intended
* to be used from the drivers ->destroy callback.
*
* Scans all the CRTCs in @dev's mode_config. If they're using @fb, removes
* it, setting it to NULL.
* Note that this function does not remove the fb from active usuage - if it is
* still used anywhere, hilarity can ensue since userspace could call getfb on
* the id and get back -EINVAL. Obviously no concern at driver unload time.
*
* Also, the framebuffer will not be removed from the lookup idr - for
* user-created framebuffers this will happen in in the rmfb ioctl. For
* driver-private objects (e.g. for fbdev) drivers need to explicitly call
* drm_framebuffer_unregister_private.
*/
void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
/*
* This could be moved to drm_framebuffer_remove(), but for
* debugging is nice to keep around the list of fb's that are
* no longer associated w/ a drm_file but are not unreferenced
* yet. (i915 and omapdrm have debugfs files which will show
* this.)
*/
drm_mode_object_put(dev, &fb->base);
 
mutex_lock(&dev->mode_config.fb_lock);
list_del(&fb->head);
dev->mode_config.num_fb--;
mutex_unlock(&dev->mode_config.fb_lock);
}
EXPORT_SYMBOL(drm_framebuffer_cleanup);
 
/**
* drm_framebuffer_remove - remove and unreference a framebuffer object
* @fb: framebuffer to remove
*
* Scans all the CRTCs and planes in @dev's mode_config. If they're
* using @fb, removes it, setting it to NULL. Then drops the reference to the
* passed-in framebuffer. Might take the modeset locks.
*
* Note that this function optimizes the cleanup away if the caller holds the
* last reference to the framebuffer. It is also guaranteed to not take the
* modeset locks in this case.
*/
void drm_framebuffer_remove(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
struct drm_plane *plane;
struct drm_mode_set set;
int ret;
 
WARN_ON(!list_empty(&fb->filp_head));
 
/*
* drm ABI mandates that we remove any deleted framebuffers from active
* useage. But since most sane clients only remove framebuffers they no
* longer need, try to optimize this away.
*
* Since we're holding a reference ourselves, observing a refcount of 1
* means that we're the last holder and can skip it. Also, the refcount
* can never increase from 1 again, so we don't need any barriers or
* locks.
*
* Note that userspace could try to race with use and instate a new
* usage _after_ we've cleared all current ones. End result will be an
* in-use fb with fb-id == 0. Userspace is allowed to shoot its own foot
* in this manner.
*/
if (atomic_read(&fb->refcount.refcount) > 1) {
drm_modeset_lock_all(dev);
/* remove from any CRTC */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb == fb) {
/* should turn off the crtc */
memset(&set, 0, sizeof(struct drm_mode_set));
set.crtc = crtc;
set.fb = NULL;
ret = drm_mode_set_config_internal(&set);
if (ret)
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
}
}
 
list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
if (plane->fb == fb) {
/* should turn off the crtc */
ret = plane->funcs->disable_plane(plane);
if (ret)
DRM_ERROR("failed to disable plane with busy fb\n");
/* disconnect the plane from the fb and crtc: */
__drm_framebuffer_unreference(plane->fb);
plane->fb = NULL;
plane->crtc = NULL;
}
}
drm_modeset_unlock_all(dev);
}
 
drm_framebuffer_unreference(fb);
}
EXPORT_SYMBOL(drm_framebuffer_remove);
 
/**
* drm_crtc_init - Initialise a new CRTC object
* @dev: DRM device
342,9 → 591,6
* @crtc: CRTC object to init
* @funcs: callbacks for the new CRTC
*
* LOCKING:
* Takes mode_config lock.
*
* Inits a new object created as base part of an driver crtc object.
*
* RETURNS:
359,7 → 605,9
crtc->funcs = funcs;
crtc->invert_dimensions = false;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
mutex_init(&crtc->mutex);
mutex_lock_nest_lock(&crtc->mutex, &dev->mode_config.mutex);
 
ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
if (ret)
371,7 → 619,7
dev->mode_config.num_crtc++;
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
return ret;
}
381,9 → 629,6
* drm_crtc_cleanup - Cleans up the core crtc usage.
* @crtc: CRTC to cleanup
*
* LOCKING:
* Caller must hold mode config lock.
*
* Cleanup @crtc. Removes from drm modesetting space
* does NOT free object, caller does that.
*/
405,9 → 650,6
* @connector: connector the new mode
* @mode: mode data
*
* LOCKING:
* Caller must hold mode config lock.
*
* Add @mode to @connector's mode list for later use.
*/
void drm_mode_probed_add(struct drm_connector *connector,
422,9 → 664,6
* @connector: connector list to modify
* @mode: mode to remove
*
* LOCKING:
* Caller must hold mode config lock.
*
* Remove @mode from @connector's mode list, then free it.
*/
void drm_mode_remove(struct drm_connector *connector,
440,11 → 679,8
* @dev: DRM device
* @connector: the connector to init
* @funcs: callbacks for this connector
* @name: user visible name of the connector
* @connector_type: user visible type of the connector
*
* LOCKING:
* Takes mode config lock.
*
* Initialises a preallocated connector. Connectors should be
* subclassed as part of driver connector objects.
*
458,7 → 694,7
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &connector->base, DRM_MODE_OBJECT_CONNECTOR);
if (ret)
488,7 → 724,7
dev->mode_config.dpms_property, 0);
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
return ret;
}
498,9 → 734,6
* drm_connector_cleanup - cleans up an initialised connector
* @connector: connector to cleanup
*
* LOCKING:
* Takes mode config lock.
*
* Cleans up the connector but doesn't free the object.
*/
void drm_connector_cleanup(struct drm_connector *connector)
517,11 → 750,9
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
 
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);
dev->mode_config.num_connector--;
mutex_unlock(&dev->mode_config.mutex);
}
EXPORT_SYMBOL(drm_connector_cleanup);
 
543,7 → 774,7
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &encoder->base, DRM_MODE_OBJECT_ENCODER);
if (ret)
557,7 → 788,7
dev->mode_config.num_encoder++;
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
return ret;
}
566,11 → 797,11
void drm_encoder_cleanup(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
drm_mode_object_put(dev, &encoder->base);
list_del(&encoder->head);
dev->mode_config.num_encoder--;
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_encoder_cleanup);
 
582,7 → 813,7
{
int ret;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
if (ret)
616,7 → 847,7
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
return ret;
}
626,7 → 857,7
{
struct drm_device *dev = plane->dev;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
kfree(plane->format_types);
drm_mode_object_put(dev, &plane->base);
/* if not added to a list, it must be a private plane */
634,7 → 865,7
list_del(&plane->head);
dev->mode_config.num_plane--;
}
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
}
EXPORT_SYMBOL(drm_plane_cleanup);
 
642,9 → 873,6
* drm_mode_create - create a new display mode
* @dev: DRM device
*
* LOCKING:
* Caller must hold DRM mode_config lock.
*
* Create a new drm_display_mode, give it an ID, and return it.
*
* RETURNS:
672,9 → 900,6
* @dev: DRM device
* @mode: mode to remove
*
* LOCKING:
* Caller must hold mode config lock.
*
* Free @mode's unique identifier, then free it.
*/
void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode)
899,16 → 1124,19
* drm_mode_config_init - initialize DRM mode_configuration structure
* @dev: DRM device
*
* LOCKING:
* None, should happen single threaded at init time.
*
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
*
* Since this initializes the modeset locks, no locking is possible. Which is no
* problem, since this should happen single threaded at init time. It is the
* driver's problem to ensure this guarantee.
*
*/
void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
mutex_init(&dev->mode_config.idr_mutex);
mutex_init(&dev->mode_config.fb_lock);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
INIT_LIST_HEAD(&dev->mode_config.crtc_list);
INIT_LIST_HEAD(&dev->mode_config.connector_list);
918,9 → 1146,9
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
drm_mode_create_standard_connector_properties(dev);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
/* Just to be sure */
dev->mode_config.num_fb = 0;
978,12 → 1206,13
* drm_mode_config_cleanup - free up DRM mode_config info
* @dev: DRM device
*
* LOCKING:
* Caller must hold mode config lock.
*
* Free up all the connectors and CRTCs associated with this DRM device, then
* free up the framebuffers and associated buffer objects.
*
* Note that since this /should/ happen single-threaded at driver/device
* teardown time, no locking is required. It's the driver's job to ensure that
* this guarantee actually holds true.
*
* FIXME: cleanup any dangling user buffer objects too
*/
void drm_mode_config_cleanup(struct drm_device *dev)
1010,6 → 1239,19
drm_property_destroy(dev, property);
}
 
/*
* Single-threaded teardown context, so it's not required to grab the
* fb_lock to protect against concurrent fb_list access. Contrary, it
* would actually deadlock with the drm_framebuffer_cleanup function.
*
* Also, if there are any framebuffers left, that's a driver leak now,
* so politely WARN about this.
*/
WARN_ON(!list_empty(&dev->mode_config.fb_list));
list_for_each_entry_safe(fb, fbt, &dev->mode_config.fb_list, head) {
drm_framebuffer_remove(fb);
}
 
list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
head) {
plane->funcs->destroy(plane);
1019,7 → 1261,6
crtc->funcs->destroy(crtc);
}
 
idr_remove_all(&dev->mode_config.crtc_idr);
idr_destroy(&dev->mode_config.crtc_idr);
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
1029,9 → 1270,6
* @out: drm_mode_modeinfo struct to return to the user
* @in: drm_display_mode to use
*
* LOCKING:
* None.
*
* Convert a drm_display_mode into a drm_mode_modeinfo structure to return to
* the user.
*/
1068,9 → 1306,6
* @out: drm_display_mode to return to the user
* @in: drm_mode_modeinfo to use
*
* LOCKING:
* None.
*
* Convert a drm_mode_modeinfo into a drm_display_mode structure to return to
* the caller.
*
1107,14 → 1342,10
#if 0
/**
* drm_mode_getresources - get graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Construct a set of configuration description structures and return
* them to the user, including CRTC, connector and framebuffer configuration.
*
1147,8 → 1378,8
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
mutex_lock(&file_priv->fbs_lock);
/*
* For the non-control nodes we need to limit the list of resources
* by IDs in the group list for this node
1156,6 → 1387,23
list_for_each(lh, &file_priv->fbs)
fb_count++;
 
/* handle this in 4 parts */
/* FBs */
if (card_res->count_fbs >= fb_count) {
copied = 0;
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (put_user(fb->base.id, fb_id + copied)) {
mutex_unlock(&file_priv->fbs_lock);
return -EFAULT;
}
copied++;
}
}
card_res->count_fbs = fb_count;
mutex_unlock(&file_priv->fbs_lock);
 
drm_modeset_lock_all(dev);
mode_group = &file_priv->master->minor->mode_group;
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) {
 
1179,21 → 1427,6
card_res->max_width = dev->mode_config.max_width;
card_res->min_width = dev->mode_config.min_width;
 
/* handle this in 4 parts */
/* FBs */
if (card_res->count_fbs >= fb_count) {
copied = 0;
fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
list_for_each_entry(fb, &file_priv->fbs, filp_head) {
if (put_user(fb->base.id, fb_id + copied)) {
ret = -EFAULT;
goto out;
}
copied++;
}
}
card_res->count_fbs = fb_count;
 
/* CRTCs */
if (card_res->count_crtcs >= crtc_count) {
copied = 0;
1289,20 → 1522,16
card_res->count_connectors, card_res->count_encoders);
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
/**
* drm_mode_getcrtc - get CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Construct a CRTC configuration structure to return to the user.
*
* Called by the user via ioctl.
1321,7 → 1550,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, crtc_resp->crtc_id,
DRM_MODE_OBJECT_CRTC);
1349,20 → 1578,16
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
/**
* drm_mode_getconnector - get connector configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Construct a connector configuration structure to return to the user.
*
* Called by the user via ioctl.
1494,6 → 1719,7
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
 
1508,7 → 1734,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, enc_resp->encoder_id,
DRM_MODE_OBJECT_ENCODER);
if (!obj) {
1527,7 → 1753,7
enc_resp->possible_clones = encoder->possible_clones;
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
1537,9 → 1763,6
* @data: ioctl data
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Return an plane count and set of IDs.
*/
int drm_mode_getplane_res(struct drm_device *dev, void *data,
1554,7 → 1777,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
config = &dev->mode_config;
 
/*
1576,7 → 1799,7
plane_resp->count_planes = config->num_plane;
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
1586,9 → 1809,6
* @data: ioctl data
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Return plane info, including formats supported, gamma size, any
* current fb, etc.
*/
1604,7 → 1824,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, plane_resp->plane_id,
DRM_MODE_OBJECT_PLANE);
if (!obj) {
1644,7 → 1864,7
plane_resp->count_format_types = plane->format_count;
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
1652,11 → 1872,8
* drm_mode_setplane - set up or tear down an plane
* @dev: DRM device
* @data: ioctl data*
* @file_prive: DRM file info
* @file_priv: DRM file info
*
* LOCKING:
* Takes mode config lock.
*
* Set plane info, including placement, fb, scaling, and other factors.
* Or pass a NULL fb to disable.
*/
1667,7 → 1884,7
struct drm_mode_object *obj;
struct drm_plane *plane;
struct drm_crtc *crtc;
struct drm_framebuffer *fb;
struct drm_framebuffer *fb = NULL, *old_fb = NULL;
int ret = 0;
unsigned int fb_width, fb_height;
int i;
1675,8 → 1892,6
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/*
* First, find the plane, crtc, and fb objects. If not available,
* we don't bother to call the driver.
1686,16 → 1901,18
if (!obj) {
DRM_DEBUG_KMS("Unknown plane ID %d\n",
plane_req->plane_id);
ret = -ENOENT;
goto out;
return -ENOENT;
}
plane = obj_to_plane(obj);
 
/* No fb means shut it down */
if (!plane_req->fb_id) {
drm_modeset_lock_all(dev);
old_fb = plane->fb;
plane->funcs->disable_plane(plane);
plane->crtc = NULL;
plane->fb = NULL;
drm_modeset_unlock_all(dev);
goto out;
}
 
1709,15 → 1926,13
}
crtc = obj_to_crtc(obj);
 
obj = drm_mode_object_find(dev, plane_req->fb_id,
DRM_MODE_OBJECT_FB);
if (!obj) {
fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
plane_req->fb_id);
ret = -ENOENT;
goto out;
}
fb = obj_to_fb(obj);
 
/* Check whether this plane supports the fb pixel format. */
for (i = 0; i < plane->format_count; i++)
1763,6 → 1978,7
goto out;
}
 
drm_modeset_lock_all(dev);
ret = plane->funcs->update_plane(plane, crtc, fb,
plane_req->crtc_x, plane_req->crtc_y,
plane_req->crtc_w, plane_req->crtc_h,
1769,26 → 1985,58
plane_req->src_x, plane_req->src_y,
plane_req->src_w, plane_req->src_h);
if (!ret) {
old_fb = plane->fb;
plane->crtc = crtc;
plane->fb = fb;
fb = NULL;
}
drm_modeset_unlock_all(dev);
 
out:
mutex_unlock(&dev->mode_config.mutex);
if (fb)
drm_framebuffer_unreference(fb);
if (old_fb)
drm_framebuffer_unreference(old_fb);
 
return ret;
}
#endif
 
/**
* drm_mode_set_config_internal - helper to call ->set_config
* @set: modeset config to set
*
* This is a little helper to wrap internal calls to the ->set_config driver
* interface. The only thing it adds is correct refcounting dance.
*/
int drm_mode_set_config_internal(struct drm_mode_set *set)
{
struct drm_crtc *crtc = set->crtc;
struct drm_framebuffer *fb, *old_fb;
int ret;
 
old_fb = crtc->fb;
fb = set->fb;
 
ret = crtc->funcs->set_config(set);
if (ret == 0) {
if (old_fb)
drm_framebuffer_unreference(old_fb);
if (fb)
drm_framebuffer_reference(fb);
}
 
return ret;
}
EXPORT_SYMBOL(drm_mode_set_config_internal);
 
#if 0
/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Build a new CRTC configuration based on user request.
*
* Called by the user via ioctl.
1818,7 → 2066,7
if (crtc_req->x > INT_MAX || crtc_req->y > INT_MAX)
return -ERANGE;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_req->crtc_id,
DRM_MODE_OBJECT_CRTC);
if (!obj) {
1840,16 → 2088,16
goto out;
}
fb = crtc->fb;
/* Make refcounting symmetric with the lookup path. */
drm_framebuffer_reference(fb);
} else {
obj = drm_mode_object_find(dev, crtc_req->fb_id,
DRM_MODE_OBJECT_FB);
if (!obj) {
fb = drm_framebuffer_lookup(dev, crtc_req->fb_id);
if (!fb) {
DRM_DEBUG_KMS("Unknown FB ID%d\n",
crtc_req->fb_id);
ret = -EINVAL;
goto out;
}
fb = obj_to_fb(obj);
}
 
mode = drm_mode_create(dev);
1946,12 → 2194,15
set.connectors = connector_set;
set.num_connectors = crtc_req->count_connectors;
set.fb = fb;
ret = crtc->funcs->set_config(&set);
ret = drm_mode_set_config_internal(&set);
 
out:
if (fb)
drm_framebuffer_unreference(fb);
 
kfree(connector_set);
drm_mode_destroy(dev, mode);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
1969,15 → 2220,14
if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id);
ret = -EINVAL;
goto out;
return -EINVAL;
}
crtc = obj_to_crtc(obj);
 
mutex_lock(&crtc->mutex);
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
ret = -ENXIO;
1997,7 → 2247,8
}
}
out:
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&crtc->mutex);
 
return ret;
}
#endif
2008,7 → 2259,7
 
switch (bpp) {
case 8:
fmt = DRM_FORMAT_RGB332;
fmt = DRM_FORMAT_C8;
break;
case 16:
if (depth == 15)
2039,14 → 2290,10
#if 0
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Add a new FB to the specified CRTC, given a user request.
*
* Called by the user via ioctl.
2080,24 → 2327,18
if ((config->min_height > r.height) || (r.height > config->max_height))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
 
/* TODO check buffer is sufficiently large */
/* TODO setup destructor callback */
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
return PTR_ERR(fb);
}
 
mutex_lock(&file_priv->fbs_lock);
or->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
 
2223,14 → 2464,10
 
/**
* drm_mode_addfb2 - add an FB to the graphics configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Add a new FB to the specified CRTC, given a user request with format.
*
* Called by the user via ioctl.
2269,34 → 2506,28
if (ret)
return ret;
 
mutex_lock(&dev->mode_config.mutex);
 
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
ret = PTR_ERR(fb);
goto out;
return PTR_ERR(fb);
}
 
mutex_lock(&file_priv->fbs_lock);
r->fb_id = fb->base.id;
list_add(&fb->filp_head, &file_priv->fbs);
DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
mutex_unlock(&file_priv->fbs_lock);
 
out:
mutex_unlock(&dev->mode_config.mutex);
 
return ret;
}
 
/**
* drm_mode_rmfb - remove an FB from the configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Remove the FB specified by the user.
*
* Called by the user via ioctl.
2307,51 → 2538,50
int drm_mode_rmfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_mode_object *obj;
struct drm_framebuffer *fb = NULL;
struct drm_framebuffer *fbl = NULL;
uint32_t *id = data;
int ret = 0;
int found = 0;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
ret = -EINVAL;
goto out;
}
fb = obj_to_fb(obj);
mutex_lock(&file_priv->fbs_lock);
mutex_lock(&dev->mode_config.fb_lock);
fb = __drm_framebuffer_lookup(dev, *id);
if (!fb)
goto fail_lookup;
 
list_for_each_entry(fbl, &file_priv->fbs, filp_head)
if (fb == fbl)
found = 1;
if (!found)
goto fail_lookup;
 
if (!found) {
ret = -EINVAL;
goto out;
}
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
__drm_framebuffer_unregister(dev, fb);
 
list_del_init(&fb->filp_head);
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
 
drm_framebuffer_remove(fb);
 
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
return 0;
 
fail_lookup:
mutex_unlock(&dev->mode_config.fb_lock);
mutex_unlock(&file_priv->fbs_lock);
 
return -EINVAL;
}
 
/**
* drm_mode_getfb - get FB info
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* LOCKING:
* Takes mode config lock.
*
* Lookup the FB given its ID and return info about it.
*
* Called by the user via ioctl.
2363,20 → 2593,15
void *data, struct drm_file *file_priv)
{
struct drm_mode_fb_cmd *r = data;
struct drm_mode_object *obj;
struct drm_framebuffer *fb;
int ret = 0;
int ret;
 
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
ret = -EINVAL;
goto out;
}
fb = obj_to_fb(obj);
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
return -EINVAL;
 
r->height = fb->height;
r->width = fb->width;
2383,10 → 2608,13
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
r->pitch = fb->pitches[0];
fb->funcs->create_handle(fb, file_priv, &r->handle);
if (fb->funcs->create_handle)
ret = fb->funcs->create_handle(fb, file_priv, &r->handle);
else
ret = -ENODEV;
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_framebuffer_unreference(fb);
 
return ret;
}
 
2396,7 → 2624,6
struct drm_clip_rect __user *clips_ptr;
struct drm_clip_rect *clips = NULL;
struct drm_mode_fb_dirty_cmd *r = data;
struct drm_mode_object *obj;
struct drm_framebuffer *fb;
unsigned flags;
int num_clips;
2405,13 → 2632,9
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
ret = -EINVAL;
goto out_err1;
}
fb = obj_to_fb(obj);
fb = drm_framebuffer_lookup(dev, r->fb_id);
if (!fb)
return -EINVAL;
 
num_clips = r->num_clips;
clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
2449,17 → 2672,19
}
 
if (fb->funcs->dirty) {
drm_modeset_lock_all(dev);
ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
clips, num_clips);
drm_modeset_unlock_all(dev);
} else {
ret = -ENOSYS;
goto out_err2;
}
 
out_err2:
kfree(clips);
out_err1:
mutex_unlock(&dev->mode_config.mutex);
drm_framebuffer_unreference(fb);
 
return ret;
}
 
2466,11 → 2691,8
 
/**
* drm_fb_release - remove and free the FBs on this file
* @filp: file * from the ioctl
* @priv: drm file for the ioctl
*
* LOCKING:
* Takes mode config lock.
*
* Destroy all the FBs associated with @filp.
*
* Called by the user via ioctl.
2483,11 → 2705,20
struct drm_device *dev = priv->minor->dev;
struct drm_framebuffer *fb, *tfb;
 
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&priv->fbs_lock);
list_for_each_entry_safe(fb, tfb, &priv->fbs, filp_head) {
 
mutex_lock(&dev->mode_config.fb_lock);
/* Mark fb as reaped, we still have a ref from fpriv->fbs. */
__drm_framebuffer_unregister(dev, fb);
mutex_unlock(&dev->mode_config.fb_lock);
 
list_del_init(&fb->filp_head);
 
/* This will also drop the fpriv->fbs reference. */
drm_framebuffer_remove(fb);
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&priv->fbs_lock);
}
#endif
 
2582,10 → 2813,9
 
/**
* drm_fb_attachmode - Attach a user mode to an connector
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* This attaches a user specified mode to an connector.
* Called by the user via ioctl.
2606,7 → 2836,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
2630,7 → 2860,7
 
drm_mode_attachmode(dev, connector, mode);
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
2637,10 → 2867,9
 
/**
* drm_fb_detachmode - Detach a user specified mode from an connector
* @inode: inode from the ioctl
* @filp: file * from the ioctl
* @cmd: cmd from ioctl
* @arg: arg from ioctl
* @dev: drm device for the ioctl
* @data: data pointer for the ioctl
* @file_priv: drm file for the ioctl call
*
* Called by the user via ioctl.
*
2660,7 → 2889,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR);
if (!obj) {
2677,7 → 2906,7
 
ret = drm_mode_detachmode(dev, connector, &mode);
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
#endif
2925,7 → 3154,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY);
if (!obj) {
ret = -EINVAL;
3003,7 → 3232,7
out_resp->count_enum_blobs = blob_count;
}
done:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
#endif
3056,7 → 3285,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB);
if (!obj) {
ret = -EINVAL;
3074,7 → 3303,7
out_resp->length = blob->length;
 
done:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
#endif
3177,7 → 3406,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!obj) {
3214,7 → 3443,7
}
arg->count_props = props_count;
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
3231,7 → 3460,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
arg_obj = drm_mode_object_find(dev, arg->obj_id, arg->obj_type);
if (!arg_obj)
3269,7 → 3498,7
}
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
#endif
3333,7 → 3562,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
3374,7 → 3603,7
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
 
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
 
}
3392,7 → 3621,7
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
3425,7 → 3654,7
goto out;
}
out:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
3462,6 → 3691,7
int *bpp)
{
switch (format) {
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB332:
case DRM_FORMAT_BGR233:
*depth = 8;
/drivers/video/drm/drm_crtc_helper.c
69,6 → 69,7
EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head);
 
static bool drm_kms_helper_poll = true;
module_param_named(poll, drm_kms_helper_poll, bool, 0600);
 
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
141,6 → 142,12
// dbgprintf("status %x\n", connector->status);
}
 
/* Re-enable polling in case the global poll config changed. */
if (drm_kms_helper_poll != dev->mode_config.poll_running)
drm_kms_helper_poll_enable(dev);
 
dev->mode_config.poll_running = drm_kms_helper_poll;
 
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
connector->base.id, drm_get_connector_name(connector));
955,7 → 962,13
}
EXPORT_SYMBOL(drm_helper_resume_force_mode);
 
#if 0
void drm_kms_helper_hotplug_event(struct drm_device *dev)
{
/* send a uevent + call fbdev */
if (dev->mode_config.funcs->output_poll_changed)
dev->mode_config.funcs->output_poll_changed(dev);
}
EXPORT_SYMBOL(drm_kms_helper_hotplug_event);
 
#define DRM_OUTPUT_POLL_PERIOD (10*HZ)
static void output_poll_execute(struct work_struct *work)
1004,8 → 1017,8
if (changed)
drm_kms_helper_hotplug_event(dev);
 
if (repoll)
schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
// if (repoll)
// schedule_delayed_work(delayed_work, DRM_OUTPUT_POLL_PERIOD);
}
 
void drm_kms_helper_poll_disable(struct drm_device *dev)
1012,7 → 1025,7
{
if (!dev->mode_config.poll_enabled)
return;
cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
// cancel_delayed_work_sync(&dev->mode_config.output_poll_work);
}
EXPORT_SYMBOL(drm_kms_helper_poll_disable);
 
1030,8 → 1043,8
poll = true;
}
 
if (poll)
schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
// if (poll)
// schedule_delayed_work(&dev->mode_config.output_poll_work, DRM_OUTPUT_POLL_PERIOD);
}
EXPORT_SYMBOL(drm_kms_helper_poll_enable);
 
1083,5 → 1096,3
drm_kms_helper_hotplug_event(dev);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
 
#endif
/drivers/video/drm/drm_edid.c
29,11 → 29,11
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "drm_edid_modes.h"
 
#define version_greater(edid, maj, min) \
(((edid)->version > (maj)) || \
87,9 → 87,6
int product_id;
u32 quirks;
} edid_quirk_list[] = {
/* ASUS VW222S */
{ "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING },
 
/* Acer AL1706 */
{ "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
/* Acer F51 */
130,6 → 127,746
{ "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
};
 
/*
* Autogenerated from the DMT spec.
* This table is copied from xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode drm_dmt_modes[] = {
/* 640x350@85Hz */
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 350, 382, 385, 445, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x400@85Hz */
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672,
736, 832, 0, 400, 401, 404, 445, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 720x400@85Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756,
828, 936, 0, 400, 401, 404, 446, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 492, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 640x480@85Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696,
752, 832, 0, 480, 481, 484, 509, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@56Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@72Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@85Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832,
896, 1048, 0, 600, 601, 604, 631, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 800x600@120Hz RB */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 73250, 800, 848,
880, 960, 0, 600, 603, 607, 636, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 848x480@60Hz */
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864,
976, 1088, 0, 480, 486, 494, 517, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@43Hz, interlace */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032,
1208, 1264, 0, 768, 768, 772, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@85Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072,
1168, 1376, 0, 768, 769, 772, 808, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@120Hz RB */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 115500, 1024, 1072,
1104, 1184, 0, 768, 771, 775, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 68250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 790, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@75Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360,
1488, 1696, 0, 768, 771, 778, 805, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x768@85Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360,
1496, 1712, 0, 768, 771, 778, 809, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@120Hz RB */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 140250, 1280, 1328,
1360, 1440, 0, 768, 771, 778, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 71000, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 823, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x800@75Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360,
1488, 1696, 0, 800, 803, 809, 838, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@85Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360,
1496, 1712, 0, 800, 803, 809, 843, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@120Hz RB */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 146250, 1280, 1328,
1360, 1440, 0, 800, 803, 809, 847, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@85Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344,
1504, 1728, 0, 960, 961, 964, 1011, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x960@120Hz RB */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 175500, 1280, 1328,
1360, 1440, 0, 960, 963, 967, 1017, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@75Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@85Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344,
1504, 1728, 0, 1024, 1025, 1028, 1072, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@120Hz RB */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 187250, 1280, 1328,
1360, 1440, 0, 1024, 1027, 1034, 1084, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@120Hz RB */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 148250, 1360, 1408,
1440, 1520, 0, 768, 771, 776, 813, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 101000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1400x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@75Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504,
1648, 1896, 0, 1050, 1053, 1057, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@85Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504,
1656, 1912, 0, 1050, 1053, 1057, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1400x1050@120Hz RB */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 208000, 1400, 1448,
1480, 1560, 0, 1050, 1053, 1057, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 88750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 926, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@75Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536,
1688, 1936, 0, 900, 903, 909, 942, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@85Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544,
1696, 1952, 0, 900, 903, 909, 948, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@120Hz RB */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 182750, 1440, 1488,
1520, 1600, 0, 900, 903, 909, 953, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@65Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@70Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@75Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 202500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@85Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@120Hz RB */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 268250, 1600, 1648,
1680, 1760, 0, 1200, 1203, 1207, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 119000, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1080, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@75Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800,
1976, 2272, 0, 1050, 1053, 1059, 1099, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@85Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808,
1984, 2288, 0, 1050, 1053, 1059, 1105, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@120Hz RB */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 245500, 1680, 1728,
1760, 1840, 0, 1050, 1053, 1059, 1112, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@75Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888,
2104, 2456, 0, 1344, 1345, 1348, 1417, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@120Hz RB */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 333250, 1792, 1840,
1872, 1952, 0, 1344, 1347, 1351, 1423, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1856x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@75Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984,
2208, 2560, 0, 1392, 1395, 1399, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1856x1392@120Hz RB */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 356500, 1856, 1904,
1936, 2016, 0, 1392, 1395, 1399, 1474, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 154000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1235, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@75Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056,
2264, 2608, 0, 1200, 1203, 1209, 1255, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@85Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064,
2272, 2624, 0, 1200, 1203, 1209, 1262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@120Hz RB */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 317000, 1920, 1968,
2000, 2080, 0, 1200, 1203, 1209, 1271, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@75Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064,
2288, 2640, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@120Hz RB */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 380500, 1920, 1968,
2000, 2080, 0, 1440, 1443, 1447, 1525, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 268500, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1646, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@75HZ */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1672, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@85HZ */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768,
3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@120Hz RB */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 552750, 2560, 2608,
2640, 2720, 0, 1600, 1603, 1609, 1694, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
};
 
static const struct drm_display_mode edid_est_modes[] = {
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824,
896, 1024, 0, 600, 601, 603, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@56Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656,
720, 840, 0, 480, 481, 484, 500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@75Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664,
704, 832, 0, 480, 489, 491, 520, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@72Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 30240, 640, 704,
768, 864, 0, 480, 483, 486, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@67Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25200, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 640x480@60Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 738,
846, 900, 0, 400, 421, 423, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 720x400@88Hz */
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 28320, 720, 738,
846, 900, 0, 400, 412, 414, 449, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 720x400@70Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1280x1024@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78800, 1024, 1040,
1136, 1312, 0, 768, 769, 772, 800, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1024x768@75Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048,
1184, 1328, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@70Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 1024x768@60Hz */
{ DRM_MODE("1024x768i", DRM_MODE_TYPE_DRIVER,44900, 1024, 1032,
1208, 1264, 0, 768, 768, 776, 817, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_INTERLACE) }, /* 1024x768@43Hz */
{ DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 57284, 832, 864,
928, 1152, 0, 624, 625, 628, 667, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, /* 832x624@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816,
896, 1056, 0, 600, 601, 604, 625, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@75Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856,
976, 1040, 0, 600, 637, 643, 666, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 800x600@72Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, /* 1152x864@75Hz */
};
 
struct minimode {
short w;
short h;
short r;
short rb;
};
 
static const struct minimode est3_modes[] = {
/* byte 6 */
{ 640, 350, 85, 0 },
{ 640, 400, 85, 0 },
{ 720, 400, 85, 0 },
{ 640, 480, 85, 0 },
{ 848, 480, 60, 0 },
{ 800, 600, 85, 0 },
{ 1024, 768, 85, 0 },
{ 1152, 864, 75, 0 },
/* byte 7 */
{ 1280, 768, 60, 1 },
{ 1280, 768, 60, 0 },
{ 1280, 768, 75, 0 },
{ 1280, 768, 85, 0 },
{ 1280, 960, 60, 0 },
{ 1280, 960, 85, 0 },
{ 1280, 1024, 60, 0 },
{ 1280, 1024, 85, 0 },
/* byte 8 */
{ 1360, 768, 60, 0 },
{ 1440, 900, 60, 1 },
{ 1440, 900, 60, 0 },
{ 1440, 900, 75, 0 },
{ 1440, 900, 85, 0 },
{ 1400, 1050, 60, 1 },
{ 1400, 1050, 60, 0 },
{ 1400, 1050, 75, 0 },
/* byte 9 */
{ 1400, 1050, 85, 0 },
{ 1680, 1050, 60, 1 },
{ 1680, 1050, 60, 0 },
{ 1680, 1050, 75, 0 },
{ 1680, 1050, 85, 0 },
{ 1600, 1200, 60, 0 },
{ 1600, 1200, 65, 0 },
{ 1600, 1200, 70, 0 },
/* byte 10 */
{ 1600, 1200, 75, 0 },
{ 1600, 1200, 85, 0 },
{ 1792, 1344, 60, 0 },
{ 1792, 1344, 85, 0 },
{ 1856, 1392, 60, 0 },
{ 1856, 1392, 75, 0 },
{ 1920, 1200, 60, 1 },
{ 1920, 1200, 60, 0 },
/* byte 11 */
{ 1920, 1200, 75, 0 },
{ 1920, 1200, 85, 0 },
{ 1920, 1440, 60, 0 },
{ 1920, 1440, 75, 0 },
};
 
static const struct minimode extra_modes[] = {
{ 1024, 576, 60, 0 },
{ 1366, 768, 60, 0 },
{ 1600, 900, 60, 0 },
{ 1680, 945, 60, 0 },
{ 1920, 1080, 60, 0 },
{ 2048, 1152, 60, 0 },
{ 2048, 1536, 60, 0 },
};
 
/*
* Probably taken from CEA-861 spec.
* This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
*/
static const struct drm_display_mode edid_cea_modes[] = {
/* 1 - 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 490, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 2 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 3 - 720x480@60Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 4 - 1280x720@60Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 5 - 1920x1080i@60Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 6 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 7 - 1440x480i@60Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 8 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 9 - 1440x240@60Hz */
{ DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
1602, 1716, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 10 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 11 - 2880x480i@60Hz */
{ DRM_MODE("2880x480i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 12 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 13 - 2880x240@60Hz */
{ DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
3204, 3432, 0, 240, 244, 247, 262, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 14 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 15 - 1440x480@60Hz */
{ DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
1596, 1716, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 16 - 1920x1080@60Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 17 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 18 - 720x576@50Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 19 - 1280x720@50Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 20 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 21 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 22 - 1440x576i@50Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 23 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 24 - 1440x288@50Hz */
{ DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
1590, 1728, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 25 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 26 - 2880x576i@50Hz */
{ DRM_MODE("2880x576i", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 27 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 28 - 2880x288@50Hz */
{ DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
3180, 3456, 0, 288, 290, 293, 312, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 29 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 30 - 1440x576@50Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1592, 1728, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 31 - 1920x1080@50Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 32 - 1920x1080@24Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 33 - 1920x1080@25Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 34 - 1920x1080@30Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 35 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 36 - 2880x480@60Hz */
{ DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
3192, 3432, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 37 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 38 - 2880x576@50Hz */
{ DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
3184, 3456, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 39 - 1920x1080i@50Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 40 - 1920x1080i@100Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 41 - 1280x720@100Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
1760, 1980, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 42 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 43 - 720x576@100Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 44 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 45 - 1440x576i@100Hz */
{ DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_DBLCLK) },
/* 46 - 1920x1080i@120Hz */
{ DRM_MODE("1920x1080i", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
DRM_MODE_FLAG_INTERLACE) },
/* 47 - 1280x720@120Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
1430, 1650, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 48 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 49 - 720x480@120Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 50 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 51 - 1440x480i@120Hz */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 52 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 53 - 720x576@200Hz */
{ DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
796, 864, 0, 576, 581, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 54 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 55 - 1440x576i@200Hz */
{ DRM_MODE("1440x576i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
1590, 1728, 0, 576, 580, 586, 625, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 56 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 57 - 720x480@240Hz */
{ DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
798, 858, 0, 480, 489, 495, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 58 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 59 - 1440x480i@240 */
{ DRM_MODE("1440x480i", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
1602, 1716, 0, 480, 488, 494, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLCLK) },
/* 60 - 1280x720@24Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 61 - 1280x720@25Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
3740, 3960, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 62 - 1280x720@30Hz */
{ DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
3080, 3300, 0, 720, 725, 730, 750, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 63 - 1920x1080@120Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 64 - 1920x1080@100Hz */
{ DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
};
 
/*** DDC fetch and block validation ***/
 
static const u8 edid_header[] = {
153,9 → 890,9
EXPORT_SYMBOL(drm_edid_header_is_valid);
 
static int edid_fixup __read_mostly = 6;
//module_param_named(edid_fixup, edid_fixup, int, 0400);
//MODULE_PARM_DESC(edid_fixup,
// "Minimum number of valid EDID header bytes (0-8, default 6)");
module_param_named(edid_fixup, edid_fixup, int, 0400);
MODULE_PARM_DESC(edid_fixup,
"Minimum number of valid EDID header bytes (0-8, default 6)");
 
/*
* Sanity check the EDID block (base or extension). Return 0 if the block
214,7 → 951,8
bad:
if (raw_edid && print_bad_edid) {
printk(KERN_ERR "Raw EDID:\n");
// print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
raw_edid, EDID_LENGTH, false);
}
return 0;
}
306,12 → 1044,9
 
static bool drm_edid_is_zero(u8 *in_edid, int length)
{
int i;
u32 *raw_edid = (u32 *)in_edid;
if (memchr_inv(in_edid, 0, length))
return false;
 
for (i = 0; i < length / 4; i++)
if (*(raw_edid + i) != 0)
return false;
return true;
}
 
318,7 → 1053,6
static u8 *
drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
size_t alloc_size;
int i, j = 0, valid_extensions = 0;
u8 *block, *new;
bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS);
344,16 → 1078,9
if (block[0x7e] == 0)
return block;
 
alloc_size = (block[0x7e] + 1) * EDID_LENGTH ;
 
new = kmalloc(alloc_size, GFP_KERNEL);
 
new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
 
memcpy(new, block, EDID_LENGTH);
kfree(block);
 
block = new;
 
for (j = 1; j <= block[0x7e]; j++) {
367,20 → 1094,22
break;
}
}
if (i == 4)
 
if (i == 4 && print_bad_edid) {
dev_warn(connector->dev->dev,
"%s: Ignoring invalid EDID block %d.\n",
drm_get_connector_name(connector), j);
 
connector->bad_edid_counter++;
}
}
 
if (valid_extensions != block[0x7e]) {
block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
block[0x7e] = valid_extensions;
new = kmalloc((valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
if (!new)
goto out;
memcpy(new, block, alloc_size);
kfree(block);
block = new;
}
 
553,7 → 1282,7
{
int i;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
const struct drm_display_mode *ptr = &drm_dmt_modes[i];
if (hsize != ptr->hdisplay)
continue;
905,7 → 1634,7
unsigned vblank = (pt->vactive_vblank_hi & 0xf) << 8 | pt->vblank_lo;
unsigned hsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc0) << 2 | pt->hsync_offset_lo;
unsigned hsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x30) << 4 | pt->hsync_pulse_width_lo;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) >> 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_offset = (pt->hsync_vsync_offset_pulse_width_hi & 0xc) << 2 | pt->vsync_offset_pulse_width_lo >> 4;
unsigned vsync_pulse_width = (pt->hsync_vsync_offset_pulse_width_hi & 0x3) << 4 | (pt->vsync_offset_pulse_width_lo & 0xf);
 
/* ignore tiny modes */
986,6 → 1715,7
}
 
mode->type = DRM_MODE_TYPE_DRIVER;
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_set_name(mode);
 
return mode;
1094,7 → 1824,7
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
for (i = 0; i < drm_num_dmt_modes; i++) {
for (i = 0; i < ARRAY_SIZE(drm_dmt_modes); i++) {
if (mode_in_range(drm_dmt_modes + i, edid, timing) &&
valid_inferred_mode(connector, drm_dmt_modes + i)) {
newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
1129,7 → 1859,7
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
 
for (i = 0; i < num_extra_modes; i++) {
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf_mode(dev, m->w, m->h, m->r, 0, 0);
if (!newmode)
1158,7 → 1888,7
struct drm_device *dev = connector->dev;
bool rb = drm_monitor_supports_rb(edid);
 
for (i = 0; i < num_extra_modes; i++) {
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_cvt_mode(dev, m->w, m->h, m->r, rb, 0, 0);
if (!newmode)
1495,9 → 2225,11
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
 
/**
* Search EDID for CEA extension block.
1525,16 → 2257,19
}
EXPORT_SYMBOL(drm_find_cea_extension);
 
/*
* Looks for a CEA mode matching given drm_display_mode.
* Returns its CEA Video ID code, or 0 if not found.
/**
* drm_match_cea_mode - look for a CEA mode matching given mode
* @to_match: display mode
*
* Returns the CEA Video ID (VIC) of the mode or 0 if it isn't a CEA-861
* mode.
*/
u8 drm_match_cea_mode(struct drm_display_mode *to_match)
u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
{
struct drm_display_mode *cea_mode;
u8 mode;
 
for (mode = 0; mode < drm_num_cea_modes; mode++) {
for (mode = 0; mode < ARRAY_SIZE(edid_cea_modes); mode++) {
cea_mode = (struct drm_display_mode *)&edid_cea_modes[mode];
 
if (drm_mode_equal(to_match, cea_mode))
1554,7 → 2289,7
 
for (mode = db; mode < db + len; mode++) {
cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
if (cea_mode < drm_num_cea_modes) {
if (cea_mode < ARRAY_SIZE(edid_cea_modes)) {
struct drm_display_mode *newmode;
newmode = drm_mode_duplicate(dev,
&edid_cea_modes[cea_mode]);
1914,6 → 2649,37
EXPORT_SYMBOL(drm_detect_monitor_audio);
 
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
u8 *edid_ext;
int i, start, end;
 
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
 
if (cea_db_offsets(edid_ext, &start, &end))
return false;
 
for_each_cea_db(edid_ext, i, start, end) {
if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
cea_db_payload_len(&edid_ext[i]) == 2) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
}
 
return false;
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
 
/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data
* @info: display info (attached to connector)
2032,6 → 2798,7
num_modes += add_cvt_modes(connector, edid);
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
num_modes += add_inferred_modes(connector, edid);
num_modes += add_cea_modes(connector, edid);
 
2093,20 → 2860,33
EXPORT_SYMBOL(drm_add_modes_noedid);
 
/**
* drm_mode_cea_vic - return the CEA-861 VIC of a given mode
* @mode: mode
* drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with
* data from a DRM display mode
* @frame: HDMI AVI infoframe
* @mode: DRM display mode
*
* RETURNS:
* The VIC number, 0 in case it's not a CEA-861 mode.
* Returns 0 on success or a negative error code on failure.
*/
uint8_t drm_mode_cea_vic(const struct drm_display_mode *mode)
int
drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode)
{
uint8_t i;
int err;
 
for (i = 0; i < drm_num_cea_modes; i++)
if (drm_mode_equal(mode, &edid_cea_modes[i]))
return i + 1;
if (!frame || !mode)
return -EINVAL;
 
err = hdmi_avi_infoframe_init(frame);
if (err < 0)
return err;
 
frame->video_code = drm_match_cea_mode(mode);
if (!frame->video_code)
return 0;
 
frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE;
frame->active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
 
return 0;
}
EXPORT_SYMBOL(drm_mode_cea_vic);
EXPORT_SYMBOL(drm_hdmi_avi_infoframe_from_display_mode);
/drivers/video/drm/drm_fb_helper.c
52,9 → 52,36
* mode setting driver. They can be used mostly independantely from the crtc
* helper functions used by many drivers to implement the kernel mode setting
* interfaces.
*
* Initialization is done as a three-step process with drm_fb_helper_init(),
* drm_fb_helper_single_add_all_connectors() and drm_fb_helper_initial_config().
* Drivers with fancier requirements than the default beheviour can override the
* second step with their own code. Teardown is done with drm_fb_helper_fini().
*
* At runtime drivers should restore the fbdev console by calling
* drm_fb_helper_restore_fbdev_mode() from their ->lastclose callback. They
* should also notify the fb helper code from updates to the output
* configuration by calling drm_fb_helper_hotplug_event(). For easier
* integration with the output polling code in drm_crtc_helper.c the modeset
* code proves a ->output_poll_changed callback.
*
* All other functions exported by the fb helper library can be used to
* implement the fbdev driver interface by the driver.
*/
 
/* simple single crtc case helper function */
/**
* drm_fb_helper_single_add_all_connectors() - add all connectors to fbdev
* emulation helper
* @fb_helper: fbdev initialized with drm_fb_helper_init
*
* This functions adds all the available connectors for use with the given
* fb_helper. This is a separate step to allow drivers to freely assign
* connectors to the fbdev, e.g. if some are reserved for special purposes or
* not adequate to be used for the fbcon.
*
* Since this is part of the initial setup before the fbdev is published, no
* locking is required.
*/
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
110,7 → 137,24
}
 
 
static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
int bound = 0, crtcs_bound = 0;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
crtcs_bound++;
if (crtc->fb == fb_helper->fb)
bound++;
}
 
if (bound < crtcs_bound)
return false;
return true;
}
 
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
120,9 → 164,20
int i, j;
 
/*
* fbdev->blank can be called from irq context in case of a panic.
* Since we already have our own special panic handler which will
* restore the fbdev console mode completely, just bail out early.
*/
 
/*
* For each CRTC in this fb, turn the connectors on/off.
*/
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return;
}
 
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
137,9 → 192,14
dev->mode_config.dpms_property, dpms_mode);
}
}
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
}
 
/**
* drm_fb_helper_blank - implementation for ->fb_blank
* @blank: desired blanking state
* @info: fbdev registered by the helper
*/
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
183,6 → 243,24
kfree(helper->crtc_info);
}
 
/**
* drm_fb_helper_init - initialize a drm_fb_helper structure
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
* @crtc_count: maximum number of crtcs to support in this fbdev emulation
* @max_conn_count: max connector count
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
* nor register the fbdev. This is only done in drm_fb_helper_initial_config()
* to allow driver writes more control over the exact init sequence.
*
* Drivers must set fb_helper->funcs before calling
* drm_fb_helper_initial_config().
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
294,6 → 372,11
return 0;
}
 
/**
* drm_fb_helper_setcmap - implementation for ->fb_setcmap
* @cmap: cmap to set
* @info: fbdev registered by the helper
*/
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
333,6 → 416,11
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
 
/**
* drm_fb_helper_check_var - implementation for ->fb_check_var
* @var: screeninfo to check
* @info: fbdev registered by the helper
*/
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
425,13 → 513,19
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
 
/* this will let fbcon do the mode init */
/**
* drm_fb_helper_set_par - implementation for ->fb_set_par
* @info: fbdev registered by the helper
*
* This will let fbcon do the mode init and is called at initialization time by
* the fbdev core when registering the driver, and later on through the hotplug
* callback.
*/
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
struct drm_crtc *crtc;
int ret;
int i;
 
440,25 → 534,29
return -EINVAL;
}
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
ret = drm_mode_set_config_internal(&fb_helper->crtc_info[i].mode_set);
if (ret) {
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
}
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
// drm_fb_helper_hotplug_event(fb_helper);
drm_fb_helper_hotplug_event(fb_helper);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
 
/**
* drm_fb_helper_pan_display - implementation for ->fb_pan_display
* @var: updated screen information
* @info: fbdev registered by the helper
*/
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
469,7 → 567,12
int ret = 0;
int i;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
}
 
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
 
479,7 → 582,7
modeset->y = var->yoffset;
 
if (modeset->num_connectors) {
ret = crtc->funcs->set_config(modeset);
ret = drm_mode_set_config_internal(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
486,15 → 589,20
}
}
}
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
 
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
/*
* Allocates the backing storage and sets up the fbdev info structure through
* the ->fb_probe callback and then registers the fbdev and sets up the panic
* notifier.
*/
static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
{
int new_fb = 0;
int ret = 0;
int crtc_count = 0;
int i;
struct fb_info *info;
572,34 → 680,44
}
 
/* push down into drivers */
new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (new_fb < 0)
return new_fb;
ret = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (ret < 0)
return ret;
 
info = fb_helper->fbdev;
 
/* set the fb pointer */
/*
* Set the fb pointer - usually drm_setup_crtcs does this for hotplug
* events, but at init time drm_setup_crtcs needs to be called before
* the fb is allocated (since we need to figure out the desired size of
* the fb before we can allocate it ...). Hence we need to fix things up
* here again.
*/
for (i = 0; i < fb_helper->crtc_count; i++)
if (fb_helper->crtc_info[i].mode_set.num_connectors)
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
 
if (new_fb) {
 
info->var.pixclock = 0;
 
// dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
// info->node, info->fix.id);
 
} else {
drm_fb_helper_set_par(info);
}
 
 
if (new_fb)
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
 
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
 
/**
* drm_fb_helper_fill_fix - initializes fixed fbdev information
* @info: fbdev registered by the helper
* @pitch: desired pitch
* @depth: desired depth
*
* Helper to fill in the fixed fbdev information useful for a non-accelerated
* fbdev emulations. Drivers which support acceleration methods which impose
* additional constraints need to set up their own limits.
*
* Drivers should call this (or their equivalent setup code) from their
* ->fb_probe callback.
*/
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
620,6 → 738,20
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
 
/**
* drm_fb_helper_fill_var - initalizes variable fbdev information
* @info: fbdev instance to set up
* @fb_helper: fb helper instance to use as template
* @fb_width: desired fb width
* @fb_height: desired fb height
*
* Sets up the variable fbdev metainformation from the given fb helper instance
* and the drm framebuffer allocated in fb_helper->fb.
*
* Drivers should call this (or their equivalent setup code) from their
* ->fb_probe callback after having allocated the fbdev backing
* storage framebuffer.
*/
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
937,6 → 1069,7
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
modeset->fb = NULL;
}
 
for (i = 0; i < fb_helper->connector_count; i++) {
953,9 → 1086,21
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
modeset->fb = fb_helper->fb;
}
}
 
/* Clear out any old modes if there are no more connected outputs. */
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
if (modeset->num_connectors == 0) {
BUG_ON(modeset->fb);
BUG_ON(modeset->num_connectors);
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = NULL;
}
}
out:
kfree(crtcs);
kfree(modes);
963,18 → 1108,23
}
 
/**
* drm_helper_initial_config - setup a sane initial connector configuration
* drm_fb_helper_initial_config - setup a sane initial connector configuration
* @fb_helper: fb_helper device struct
* @bpp_sel: bpp value to use for the framebuffer configuration
*
* LOCKING:
* Called at init time by the driver to set up the @fb_helper initial
* configuration, must take the mode config lock.
*
* Scans the CRTCs and connectors and tries to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* Note that this also registers the fbdev and so allows userspace to call into
* the driver through the fbdev interfaces.
*
* This function will call down into the ->fb_probe callback to let
* the driver allocate and initialize the fbdev info structure and the drm
* framebuffer used to back the fbdev. drm_fb_helper_fill_var() and
* drm_fb_helper_fill_fix() are provided as helpers to setup simple default
* values for the fbdev info structure.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
983,9 → 1133,6
struct drm_device *dev = fb_helper->dev;
int count = 0;
 
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(fb_helper->dev);
 
// drm_fb_helper_parse_command_line(fb_helper);
 
count = drm_fb_helper_probe_connector_modes(fb_helper,
1003,18 → 1150,22
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
#if 0
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
* probing all the outputs attached to the fb
* @fb_helper: the drm_fb_helper
*
* LOCKING:
* Called at runtime, must take mode config lock.
*
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
* Called at runtime, takes the mode config locks to be able to check/change the
* modeset configuration. Must be run from process context (which usually means
* either the output polling work or a work item launched from the driver's
* hotplug interrupt).
*
* Note that the driver must ensure that this is only called _after_ the fb has
* been fully set up, i.e. after the call to drm_fb_helper_initial_config.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
1023,23 → 1174,14
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
int bound = 0, crtcs_bound = 0;
struct drm_crtc *crtc;
 
if (!fb_helper->fb)
return 0;
 
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
crtcs_bound++;
if (crtc->fb == fb_helper->fb)
bound++;
}
 
if (bound < crtcs_bound) {
mutex_lock(&fb_helper->dev->mode_config.mutex);
if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
return 0;
}
DRM_DEBUG_KMS("\n");
1050,13 → 1192,16
 
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
mutex_unlock(&fb_helper->dev->mode_config.mutex);
 
drm_modeset_lock_all(dev);
drm_setup_crtcs(fb_helper);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
drm_fb_helper_set_par(fb_helper->fbdev);
 
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
#endif
 
 
 
/drivers/video/drm/drm_gem.c
217,6 → 217,9
* we may want to use ida for number allocation and a hash table
* for the pointers, anyway.
*/
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
spin_lock(&filp->table_lock);
 
/* Check if we currently have a reference on the object */
257,21 → 260,19
int ret;
 
/*
* Get the user-visible handle using idr.
* Get the user-visible handle using idr. Preload and perform
* allocation under our spinlock.
*/
again:
/* ensure there is space available to allocate a handle */
if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
return -ENOMEM;
idr_preload(GFP_KERNEL);
spin_lock(&file_priv->table_lock);
 
/* do the allocation under our spinlock */
spin_lock(&file_priv->table_lock);
ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
 
spin_unlock(&file_priv->table_lock);
if (ret == -EAGAIN)
goto again;
else if (ret)
idr_preload_end();
if (ret < 0)
return ret;
*handlep = ret;
 
drm_gem_object_handle_reference(obj);
 
384,6 → 385,9
{
struct drm_gem_object *obj;
 
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
spin_lock(&filp->table_lock);
 
/* Check if we currently have a reference on the object */
439,23 → 443,18
if (obj == NULL)
return -ENOENT;
 
again:
if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) {
ret = -ENOMEM;
goto err;
}
 
idr_preload(GFP_KERNEL);
spin_lock(&dev->object_name_lock);
if (!obj->name) {
ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
&obj->name);
ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
obj->name = ret;
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
idr_preload_end();
 
if (ret == -EAGAIN)
goto again;
else if (ret)
if (ret < 0)
goto err;
ret = 0;
 
/* Allocate a reference for the name table. */
drm_gem_object_reference(obj);
462,6 → 461,7
} else {
args->name = (uint64_t) obj->name;
spin_unlock(&dev->object_name_lock);
idr_preload_end();
ret = 0;
}
 
488,6 → 488,9
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
 
if(handle == -2)
printf("%s handle %d\n", __FUNCTION__, handle);
 
spin_lock(&dev->object_name_lock);
obj = idr_find(&dev->object_name_idr, (int) args->name);
if (obj)
549,8 → 552,6
{
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, file_private);
 
idr_remove_all(&file_private->object_idr);
idr_destroy(&file_private->object_idr);
}
#endif
/drivers/video/drm/drm_irq.c
111,6 → 111,7
 
/* Valid dotclock? */
if (dotclock > 0) {
int frame_size;
/* Convert scanline length in pixels and video dot clock to
* line duration, frame duration and pixel duration in
* nanoseconds:
118,7 → 119,10
pixeldur_ns = (s64) div64_u64(1000000000, dotclock);
linedur_ns = (s64) div64_u64(((u64) crtc->hwmode.crtc_htotal *
1000000000), dotclock);
framedur_ns = (s64) crtc->hwmode.crtc_vtotal * linedur_ns;
frame_size = crtc->hwmode.crtc_htotal *
crtc->hwmode.crtc_vtotal;
framedur_ns = (s64) div64_u64((u64) frame_size * 1000000000,
dotclock);
} else
DRM_ERROR("crtc %d: Can't calculate constants, dotclock = 0!\n",
crtc->base.id);
/drivers/video/drm/drm_mm.c
102,20 → 102,6
}
EXPORT_SYMBOL(drm_mm_pre_get);
 
static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node)
{
return hole_node->start + hole_node->size;
}
 
static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node)
{
struct drm_mm_node *next_node =
list_entry(hole_node->node_list.next, struct drm_mm_node,
node_list);
 
return next_node->start;
}
 
static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
struct drm_mm_node *node,
unsigned long size, unsigned alignment,
127,7 → 113,7
unsigned long adj_start = hole_start;
unsigned long adj_end = hole_end;
 
BUG_ON(!hole_node->hole_follows || node->allocated);
BUG_ON(node->allocated);
 
if (mm->color_adjust)
mm->color_adjust(hole_node, color, &adj_start, &adj_end);
155,12 → 141,57
BUG_ON(node->start + node->size > adj_end);
 
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
}
 
struct drm_mm_node *drm_mm_create_block(struct drm_mm *mm,
unsigned long start,
unsigned long size,
bool atomic)
{
struct drm_mm_node *hole, *node;
unsigned long end = start + size;
unsigned long hole_start;
unsigned long hole_end;
 
drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
if (hole_start > start || hole_end < end)
continue;
 
node = drm_mm_kmalloc(mm, atomic);
if (unlikely(node == NULL))
return NULL;
 
node->start = start;
node->size = size;
node->mm = mm;
node->allocated = 1;
 
INIT_LIST_HEAD(&node->hole_stack);
list_add(&node->node_list, &hole->node_list);
 
if (start == hole_start) {
hole->hole_follows = 0;
list_del_init(&hole->hole_stack);
}
 
node->hole_follows = 0;
if (end != hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
 
return node;
}
 
WARN(1, "no hole found for block 0x%lx + 0x%lx\n", start, size);
return NULL;
}
EXPORT_SYMBOL(drm_mm_create_block);
 
struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *hole_node,
unsigned long size,
unsigned alignment,
253,7 → 284,7
BUG_ON(node->start + node->size > end);
 
node->hole_follows = 0;
if (node->start + node->size < hole_end) {
if (__drm_mm_hole_node_start(node) < hole_end) {
list_add(&node->hole_stack, &mm->hole_stack);
node->hole_follows = 1;
}
327,13 → 358,14
list_entry(node->node_list.prev, struct drm_mm_node, node_list);
 
if (node->hole_follows) {
BUG_ON(drm_mm_hole_node_start(node)
== drm_mm_hole_node_end(node));
BUG_ON(__drm_mm_hole_node_start(node) ==
__drm_mm_hole_node_end(node));
list_del(&node->hole_stack);
} else
BUG_ON(drm_mm_hole_node_start(node)
!= drm_mm_hole_node_end(node));
BUG_ON(__drm_mm_hole_node_start(node) !=
__drm_mm_hole_node_end(node));
 
 
if (!prev_node->hole_follows) {
prev_node->hole_follows = 1;
list_add(&prev_node->hole_stack, &mm->hole_stack);
390,6 → 422,8
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long adj_start;
unsigned long adj_end;
unsigned long best_size;
 
BUG_ON(mm->scanned_blocks);
397,10 → 431,7
best = NULL;
best_size = ~0UL;
 
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry);
 
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
407,7 → 438,6
continue;
}
 
BUG_ON(!entry->hole_follows);
if (!check_free_hole(adj_start, adj_end, size, alignment))
continue;
 
434,6 → 464,8
{
struct drm_mm_node *entry;
struct drm_mm_node *best;
unsigned long adj_start;
unsigned long adj_end;
unsigned long best_size;
 
BUG_ON(mm->scanned_blocks);
441,14 → 473,12
best = NULL;
best_size = ~0UL;
 
list_for_each_entry(entry, &mm->hole_stack, hole_stack) {
unsigned long adj_start = drm_mm_hole_node_start(entry) < start ?
start : drm_mm_hole_node_start(entry);
unsigned long adj_end = drm_mm_hole_node_end(entry) > end ?
end : drm_mm_hole_node_end(entry);
drm_mm_for_each_hole(entry, mm, adj_start, adj_end) {
if (adj_start < start)
adj_start = start;
if (adj_end > end)
adj_end = end;
 
BUG_ON(!entry->hole_follows);
 
if (mm->color_adjust) {
mm->color_adjust(entry, color, &adj_start, &adj_end);
if (adj_end <= adj_start)
/drivers/video/drm/drm_modes.c
504,7 → 504,75
}
EXPORT_SYMBOL(drm_gtf_mode);
 
#if IS_ENABLED(CONFIG_VIDEOMODE)
int drm_display_mode_from_videomode(const struct videomode *vm,
struct drm_display_mode *dmode)
{
dmode->hdisplay = vm->hactive;
dmode->hsync_start = dmode->hdisplay + vm->hfront_porch;
dmode->hsync_end = dmode->hsync_start + vm->hsync_len;
dmode->htotal = dmode->hsync_end + vm->hback_porch;
 
dmode->vdisplay = vm->vactive;
dmode->vsync_start = dmode->vdisplay + vm->vfront_porch;
dmode->vsync_end = dmode->vsync_start + vm->vsync_len;
dmode->vtotal = dmode->vsync_end + vm->vback_porch;
 
dmode->clock = vm->pixelclock / 1000;
 
dmode->flags = 0;
if (vm->dmt_flags & VESA_DMT_HSYNC_HIGH)
dmode->flags |= DRM_MODE_FLAG_PHSYNC;
else if (vm->dmt_flags & VESA_DMT_HSYNC_LOW)
dmode->flags |= DRM_MODE_FLAG_NHSYNC;
if (vm->dmt_flags & VESA_DMT_VSYNC_HIGH)
dmode->flags |= DRM_MODE_FLAG_PVSYNC;
else if (vm->dmt_flags & VESA_DMT_VSYNC_LOW)
dmode->flags |= DRM_MODE_FLAG_NVSYNC;
if (vm->data_flags & DISPLAY_FLAGS_INTERLACED)
dmode->flags |= DRM_MODE_FLAG_INTERLACE;
if (vm->data_flags & DISPLAY_FLAGS_DOUBLESCAN)
dmode->flags |= DRM_MODE_FLAG_DBLSCAN;
drm_mode_set_name(dmode);
 
return 0;
}
EXPORT_SYMBOL_GPL(drm_display_mode_from_videomode);
#endif
 
#if IS_ENABLED(CONFIG_OF_VIDEOMODE)
/**
* of_get_drm_display_mode - get a drm_display_mode from devicetree
* @np: device_node with the timing specification
* @dmode: will be set to the return value
* @index: index into the list of display timings in devicetree
*
* This function is expensive and should only be used, if only one mode is to be
* read from DT. To get multiple modes start with of_get_display_timings and
* work with that instead.
*/
int of_get_drm_display_mode(struct device_node *np,
struct drm_display_mode *dmode, int index)
{
struct videomode vm;
int ret;
 
ret = of_get_videomode(np, &vm, index);
if (ret)
return ret;
 
drm_display_mode_from_videomode(&vm, dmode);
 
pr_debug("%s: got %dx%d display mode from %s\n",
of_node_full_name(np), vm.hactive, vm.vactive, np->name);
drm_mode_debug_printmodeline(dmode);
 
return 0;
}
EXPORT_SYMBOL_GPL(of_get_drm_display_mode);
#endif
 
/**
* drm_mode_set_name - set the name on a mode
* @mode: name will be set in this mode
*
/drivers/video/drm/drm_pci.c
88,7 → 88,6
int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *mask)
{
struct pci_dev *root;
int pos;
u32 lnkcap, lnkcap2;
 
*mask = 0;
103,22 → 102,15
#if 0
root = dev->pdev->bus->self;
 
pos = pci_pcie_cap(root);
if (!pos)
/* we've been informed via and serverworks don't make the cut */
if (root->vendor == PCI_VENDOR_ID_VIA ||
root->vendor == PCI_VENDOR_ID_SERVERWORKS)
return -EINVAL;
 
/* we've been informed via and serverworks don't make the cut */
// if (root->vendor == PCI_VENDOR_ID_VIA ||
// root->vendor == PCI_VENDOR_ID_SERVERWORKS)
// return -EINVAL;
pcie_capability_read_dword(root, PCI_EXP_LNKCAP, &lnkcap);
pcie_capability_read_dword(root, PCI_EXP_LNKCAP2, &lnkcap2);
 
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP, &lnkcap);
pci_read_config_dword(root, pos + PCI_EXP_LNKCAP2, &lnkcap2);
 
lnkcap &= PCI_EXP_LNKCAP_SLS;
lnkcap2 &= 0xfe;
 
if (lnkcap2) { /* PCIE GEN 3.0 */
if (lnkcap2) { /* PCIe r3.0-compliant */
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
125,11 → 117,11
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
*mask |= DRM_PCIE_SPEED_80;
} else {
if (lnkcap & 1)
} else { /* pre-r3.0 */
if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
*mask |= DRM_PCIE_SPEED_25;
if (lnkcap & 2)
*mask |= DRM_PCIE_SPEED_50;
if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
*mask |= (DRM_PCIE_SPEED_25 | DRM_PCIE_SPEED_50);
}
 
DRM_INFO("probing gen 2 caps for device %x:%x = %x/%x\n", root->vendor, root->device, lnkcap, lnkcap2);
/drivers/video/drm/i915/sna/sna_stream.c
File deleted
/drivers/video/drm/i915/sna/gen6_render.c
File deleted
/drivers/video/drm/i915/sna/compiler.h
File deleted
/drivers/video/drm/i915/sna/sna.c
File deleted
/drivers/video/drm/i915/sna/sna_reg.h
File deleted
/drivers/video/drm/i915/sna/gen6_render.h
File deleted
/drivers/video/drm/i915/sna/kgem.c
File deleted
/drivers/video/drm/i915/sna/sna_render.h
File deleted
/drivers/video/drm/i915/sna/sna.h
File deleted
/drivers/video/drm/i915/sna/kgem.h
File deleted
/drivers/video/drm/i915/sna
Property changes:
Deleted: bugtraq:number
-true
\ No newline at end of property
/drivers/video/drm/i915/render/exa_wm_src_sample_argb.g6b
File deleted
/drivers/video/drm/i915/render/exa_wm_mask_affine.g6b
File deleted
/drivers/video/drm/i915/render/exa_wm_write.g6b
File deleted
/drivers/video/drm/i915/render/exa_wm_mask_sample_a.g6b
File deleted
/drivers/video/drm/i915/render/exa_wm_src_affine.g6b
File deleted
/drivers/video/drm/i915/render/exa_wm_noca.g6b
File deleted
/drivers/video/drm/i915/render/exa_wm_src_projective.g6b
File deleted
/drivers/video/drm/i915/render
Property changes:
Deleted: bugtraq:number
-true
\ No newline at end of property
/drivers/video/drm/i915/Gtt/intel-gtt.c
15,6 → 15,8
* /fairy-tale-mode off
*/
 
#include <syscall.h>
 
#include <linux/module.h>
#include <errno-base.h>
#include <linux/pci.h>
30,7 → 32,6
#include "intel-agp.h"
#include <drm/intel-gtt.h>
 
#include <syscall.h>
 
struct pci_dev *
pci_get_device(unsigned int vendor, unsigned int device, struct pci_dev *from);
86,7 → 87,6
};
 
static struct _intel_private {
struct intel_gtt base;
const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
101,7 → 101,18
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
phys_addr_t scratch_page_dma;
int refcount;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
phys_addr_t gma_bus_addr;
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
} intel_private;
 
#define INTEL_GTT_GEN intel_private.driver->gen
118,7 → 129,7
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
intel_private.base.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page_dma = page_to_phys(page);
 
intel_private.scratch_page = page;
 
300,7 → 311,7
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
return intel_private.base.gtt_mappable_entries;
return intel_private.gtt_mappable_entries;
}
}
 
362,8 → 373,8
if (ret != 0)
return ret;
 
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries();
intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.gtt_total_entries = intel_gtt_total_entries();
 
/* save the PGETBL reg for resume */
intel_private.PGETBL_save =
375,10 → 386,10
 
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
intel_private.base.gtt_total_entries * 4,
intel_private.base.gtt_mappable_entries * 4);
intel_private.gtt_total_entries * 4,
intel_private.gtt_mappable_entries * 4);
 
gtt_map_size = intel_private.base.gtt_total_entries * 4;
gtt_map_size = intel_private.gtt_total_entries * 4;
 
intel_private.gtt = NULL;
if (intel_private.gtt == NULL)
389,13 → 400,12
iounmap(intel_private.registers);
return -ENOMEM;
}
intel_private.base.gtt = intel_private.gtt;
 
asm volatile("wbinvd");
 
intel_private.base.stolen_size = intel_gtt_stolen_size();
intel_private.stolen_size = intel_gtt_stolen_size();
 
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
 
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
410,8 → 420,9
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
 
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
 
 
return 0;
}
 
528,7 → 539,7
unsigned int i;
 
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
594,25 → 605,6
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline int needs_idle_maps(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
 
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
#endif
return 0;
}
 
static int i9xx_setup(void)
{
u32 reg_addr, gtt_addr;
640,9 → 632,6
break;
}
 
if (needs_idle_maps())
intel_private.base.do_idle_maps = 1;
 
intel_i9xx_setup_flush();
 
return 0;
794,8 → 783,18
struct agp_bridge_data *bridge)
{
int i, mask;
intel_private.driver = NULL;
 
/*
* Can be called from the fake agp driver but also directly from
* drm/i915.ko. Hence we need to check whether everything is set up
* already.
*/
if (intel_private.driver) {
intel_private.refcount++;
return 1;
}
 
 
for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
intel_private.driver =
807,6 → 806,8
if (!intel_private.driver)
return 0;
 
intel_private.refcount++;
 
if (bridge) {
bridge->dev_private_data = &intel_private;
bridge->dev = bridge_pdev;
834,9 → 835,13
}
EXPORT_SYMBOL(intel_gmch_probe);
 
struct intel_gtt *intel_gtt_get(void)
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size,
phys_addr_t *mappable_base, unsigned long *mappable_end)
{
return &intel_private.base;
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
*mappable_base = intel_private.gma_bus_addr;
*mappable_end = intel_private.gtt_mappable_entries << PAGE_SHIFT;
}
EXPORT_SYMBOL(intel_gtt_get);
 
/drivers/video/drm/i915/i915_dma.c
997,6 → 997,12
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
case I915_PARAM_HAS_EXEC_NO_RELOC:
value = 1;
break;
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
1051,54 → 1057,7
#endif
 
 
static int i915_set_status_page(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
 
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
 
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
WARN(1, "tried to set status page when mode setting active\n");
return 0;
}
 
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
dev_priv->dri1.gfx_hws_cpu_addr =
ioremap(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
return -ENOMEM;
}
 
memset(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
 
DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
ring->status_page.gfx_addr);
DRM_DEBUG_DRIVER("load hws at %p\n",
ring->status_page.page_addr);
return 0;
}
 
static int i915_get_bridge_dev(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1200,18 → 1159,21
if (ret)
goto cleanup_vga_switcheroo;
 
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem_stolen;
 
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
intel_modeset_init(dev);
 
ret = i915_gem_init(dev);
if (ret)
goto cleanup_gem_stolen;
goto cleanup_irq;
 
 
intel_modeset_gem_init(dev);
 
ret = drm_irq_install(dev);
if (ret)
goto cleanup_gem;
 
/* Always safe in the mode setting case. */
/* FIXME: do pre/post-mode set stuff in core KMS code */
dev->vblank_disable_allowed = 1;
1218,22 → 1180,40
 
ret = intel_fbdev_init(dev);
if (ret)
goto cleanup_irq;
goto cleanup_gem;
 
// drm_kms_helper_poll_init(dev);
/* Only enable hotplug handling once the fbdev is fully set up. */
intel_hpd_init(dev);
 
/*
* Some ports require correctly set-up hpd registers for detection to
* work properly (leading to ghost connected connector status), e.g. VGA
* on gm45. Hence we can only set up the initial fbdev config after hpd
* irqs are fully enabled. Now we should scan for the initial config
* only once hotplug handling is enabled, but due to screwed-up locking
* around kms/fbdev init we can't protect the fdbev initial config
* scanning against hotplug events. Hence do this first and ignore the
* tiny window where we will loose hotplug notifactions.
*/
intel_fbdev_initial_config(dev);
 
/* Only enable hotplug handling once the fbdev is fully set up. */
dev_priv->enable_hotplug_processing = true;
 
drm_kms_helper_poll_init(dev);
 
/* We're off and running w/KMS */
dev_priv->mm.suspended = 0;
 
return 0;
 
cleanup_gem:
mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_irq:
// drm_irq_uninstall(dev);
cleanup_gem:
// mutex_lock(&dev->struct_mutex);
// i915_gem_cleanup_ringbuffer(dev);
// mutex_unlock(&dev->struct_mutex);
// i915_gem_cleanup_aliasing_ppgtt(dev);
cleanup_gem_stolen:
// i915_gem_cleanup_stolen(dev);
cleanup_vga_switcheroo:
1336,8 → 1316,7
goto put_gmch;
}
 
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
aperture_size = dev_priv->gtt.mappable_end;
 
 
 
1389,11 → 1368,12
*/
 
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
mutex_init(&dev_priv->dpio_lock);
 
mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
1444,7 → 1424,7
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
// intel_gmch_remove();
// dev_priv->gtt.gtt_remove(dev);
put_bridge:
// pci_dev_put(dev_priv->bridge_dev);
free_priv:
1476,11 → 1456,11
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
io_mapping_free(dev_priv->mm.gtt_mapping);
io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr,
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
dev_priv->gtt.mappable_base,
dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
 
1506,8 → 1486,8
}
 
/* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->hangcheck_timer);
cancel_work_sync(&dev_priv->error_work);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
 
if (dev->pdev->msi_enabled)
1526,10 → 1506,7
mutex_unlock(&dev->struct_mutex);
i915_gem_cleanup_aliasing_ppgtt(dev);
i915_gem_cleanup_stolen(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
 
intel_cleanup_overlay(dev);
 
if (!I915_NEED_GFX_HWS(dev))
i915_free_hws(dev);
}
1541,7 → 1518,11
intel_teardown_mchbar(dev);
 
destroy_workqueue(dev_priv->wq);
pm_qos_remove_request(&dev_priv->pm_qos);
 
if (dev_priv->slab)
kmem_cache_destroy(dev_priv->slab);
 
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
 
/drivers/video/drm/i915/i915_drv.c
52,26 → 52,30
struct drm_file *drm_file_handlers[256];
 
static int i915_modeset __read_mostly = 1;
module_param_named(modeset, i915_modeset, int, 0400);
MODULE_PARM_DESC(modeset,
"Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, "
"1=on, -1=force vga console preference [default])");
 
 
int i915_panel_ignore_lid __read_mostly = 0;
int i915_panel_ignore_lid __read_mostly = 1;
module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
MODULE_PARM_DESC(panel_ignore_lid,
"Override lid status (0=autodetect [default], 1=lid open, "
"-1=lid closed)");
"Override lid status (0=autodetect, 1=autodetect disabled [default], "
"-1=force lid closed, -2=force lid open)");
 
unsigned int i915_powersave __read_mostly = 0;
module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
 
int i915_semaphores __read_mostly = -1;
 
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
 
int i915_enable_rc6 __read_mostly = 0;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6. "
"Different stages can be selected via bitmask values "
80,34 → 84,41
"default: -1 (use per-chip default)");
 
int i915_enable_fbc __read_mostly = 0;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
MODULE_PARM_DESC(i915_enable_fbc,
"Enable frame buffer compression for power savings "
"(default: -1 (use per-chip default))");
 
unsigned int i915_lvds_downclock __read_mostly = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
MODULE_PARM_DESC(lvds_downclock,
"Use panel (LVDS/eDP) downclocking for power savings "
"(default: false)");
 
int i915_lvds_channel_mode __read_mostly;
module_param_named(lvds_channel_mode, i915_lvds_channel_mode, int, 0600);
MODULE_PARM_DESC(lvds_channel_mode,
"Specify LVDS channel mode "
"(0=probe BIOS [default], 1=single-channel, 2=dual-channel)");
 
int i915_panel_use_ssc __read_mostly = -1;
module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
MODULE_PARM_DESC(lvds_use_ssc,
"Use Spread Spectrum Clock with panels [LVDS/eDP] "
"(default: auto from VBT)");
 
int i915_vbt_sdvo_panel_type __read_mostly = -1;
module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
MODULE_PARM_DESC(vbt_sdvo_panel_type,
"Override/Ignore selection of SDVO panel mode in the VBT "
"(-2=ignore, -1=auto [default], index in VBT BIOS table)");
 
static bool i915_try_reset __read_mostly = true;
module_param_named(reset, i915_try_reset, bool, 0600);
MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)");
 
bool i915_enable_hangcheck __read_mostly = false;
module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644);
MODULE_PARM_DESC(enable_hangcheck,
"Periodically check GPU activity for detecting hangs. "
"WARNING: Disabling this can cause system wide hangs. "
114,10 → 125,12
"(default: true)");
 
int i915_enable_ppgtt __read_mostly = false;
module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600);
MODULE_PARM_DESC(i915_enable_ppgtt,
"Enable PPGTT (default: true)");
 
unsigned int i915_preliminary_hw_support __read_mostly = true;
module_param_named(preliminary_hw_support, i915_preliminary_hw_support, int, 0600);
MODULE_PARM_DESC(preliminary_hw_support,
"Enable preliminary hardware support. "
"Enable Haswell and ValleyView Support. "
254,6 → 267,7
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
};
 
static const struct intel_device_info intel_valleyview_d_info = {
263,6 → 277,7
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
};
 
static const struct intel_device_info intel_haswell_d_info = {
350,15 → 365,15
INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */
INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */
INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */
INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */
INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */
INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info),
INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info),
454,7 → 469,7
if( unlikely(ent == NULL) )
{
dbgprintf("device not found\n");
return 0;
return -ENODEV;
};
 
struct intel_device_info *intel_info =
730,8 → 745,6
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
757,11 → 770,7
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
write##y(val, dev_priv->regs + reg); \
} \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \
/drivers/video/drm/i915/i915_drv.h
30,6 → 30,8
#ifndef _I915_DRV_H_
#define _I915_DRV_H_
 
#include <uapi/drm/i915_drm.h>
 
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
96,7 → 98,12
};
#define port_name(p) ((p) + 'A')
 
#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
#define I915_GEM_GPU_DOMAINS \
(I915_GEM_DOMAIN_RENDER | \
I915_GEM_DOMAIN_SAMPLER | \
I915_GEM_DOMAIN_COMMAND | \
I915_GEM_DOMAIN_INSTRUCTION | \
I915_GEM_DOMAIN_VERTEX)
 
#define for_each_pipe(p) for ((p) = 0; (p) < dev_priv->num_pipe; (p)++)
 
114,6 → 121,19
};
#define I915_NUM_PLLS 2
 
/* Used by dp and fdi links */
struct intel_link_m_n {
uint32_t tu;
uint32_t gmch_m;
uint32_t gmch_n;
uint32_t link_m;
uint32_t link_n;
};
 
void intel_link_compute_m_n(int bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n);
 
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
143,8 → 163,13
#define I915_GEM_PHYS_OVERLAY_REGS 3
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
 
struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
struct drm_i915_gem_object *cur_obj;
};
 
 
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
287,6 → 312,7
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*hpd_irq_setup)(struct drm_device *dev);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
326,6 → 352,7
DEV_INFO_FLAG(has_llc)
 
struct intel_device_info {
u32 display_mmio_offset;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
353,6 → 380,50
u8 has_llc:1;
};
 
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
 
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
* portion of the GTT which can be mapped by the CPU and remain both coherent
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
unsigned long start; /* Start offset of used GTT */
size_t total; /* Total size GTT can map */
size_t stolen_size; /* Total size of stolen memory */
 
unsigned long mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
 
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
 
bool do_idle_maps;
dma_addr_t scratch_page_dma;
struct page *scratch_page;
 
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen, phys_addr_t *mappable_base,
unsigned long *mappable_end);
void (*gtt_remove)(struct drm_device *dev);
void (*gtt_clear_range)(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries);
void (*gtt_insert_entries)(struct drm_device *dev,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
};
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
 
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
362,6 → 433,16
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
 
/* pte functions, mirroring the interface of the global gtt. */
void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
unsigned int first_entry,
unsigned int num_entries);
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
 
 
588,6 → 669,9
struct mutex hw_lock;
};
 
/* defined intel_pm.c */
extern spinlock_t mchdev_lock;
 
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
628,6 → 712,158
struct work_struct error_work;
};
 
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
* are idle and not used by the GPU) but still have
* (presumably uncached) pages still attached.
*/
struct list_head unbound_list;
 
/** Usable portion of the GTT for GEM */
unsigned long stolen_base; /* limited to low memory (32-bit) */
 
int gtt_mtrr;
 
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
bool shrinker_no_lock_stealing;
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
 
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
 
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
 
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
 
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
 
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
/* accounting, useful for userland debugging */
size_t object_memory;
u32 object_count;
};
 
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
struct work_struct work;
 
unsigned long last_reset;
 
/**
* State variable and reset counter controlling the reset flow
*
* Upper bits are for the reset counter. This counter is used by the
* wait_seqno code to race-free noticed that a reset event happened and
* that it needs to restart the entire ioctl (since most likely the
* seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
*
* Lowest bit controls the reset state machine: Set means a reset is in
* progress. This state will (presuming we don't have any bugs) decay
* into either unset (successful reset) or the special WEDGED value (hw
* terminally sour). All waiters on the reset_queue will be woken when
* that happens.
*/
atomic_t reset_counter;
 
/**
* Special values/flags for reset_counter
*
* Note that the code relies on
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
* being true.
*/
#define I915_RESET_IN_PROGRESS_FLAG 1
#define I915_WEDGED 0xffffffff
 
/**
* Waitqueue to signal when the reset has completed. Used by clients
* that wait for dev_priv->mm.wedged to settle.
*/
wait_queue_head_t reset_queue;
 
/* For gpu hang simulation. */
unsigned int stop_rings;
};
 
enum modeset_restore {
MODESET_ON_LID_OPEN,
MODESET_DONE,
MODESET_SUSPENDED,
};
 
typedef struct drm_i915_private {
struct drm_device *dev;
 
644,10 → 880,11
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
spinlock_t gt_lock;
 
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
 
 
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
657,9 → 894,11
*/
uint32_t gpio_mmio_base;
 
wait_queue_head_t gmbus_wait_queue;
 
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
uint32_t last_seqno, next_seqno;
 
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
669,31 → 908,24
/* protects the irq masks */
spinlock_t irq_lock;
 
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
// struct pm_qos_request pm_qos;
 
/* DPIO indirect register protection */
spinlock_t dpio_lock;
struct mutex dpio_lock;
 
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
 
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
bool enable_hotplug_processing;
 
int num_pipe;
int num_pch_pll;
 
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
unsigned int stop_rings;
 
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
704,7 → 936,7
 
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
unsigned int sprite_scaling_enabled;
 
/* LVDS info */
int backlight_level; /* restore backlight to this value */
721,7 → 953,6
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
742,11 → 973,6
 
unsigned int fsb_freq, mem_freq, is_ddr3;
 
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
 
/* Display functions */
758,116 → 984,13
 
unsigned long quirks;
 
/* Register state */
bool modeset_on_lid;
enum modeset_restore modeset_restore;
struct mutex modeset_restore_lock;
 
struct {
/** Bridge to intel-gtt-ko */
struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
/** List of all objects in gtt_space. Used to restore gtt
* mappings on resume */
struct list_head bound_list;
/**
* List of objects which are not bound to the GTT (thus
* are idle and not used by the GPU) but still have
* (presumably uncached) pages still attached.
*/
struct list_head unbound_list;
struct i915_gtt gtt;
 
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
unsigned long gtt_mappable_end;
unsigned long gtt_end;
struct i915_gem_mm mm;
 
// struct io_mapping *gtt_mapping;
phys_addr_t gtt_base_addr;
int gtt_mtrr;
 
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
// struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
 
/**
* List of objects currently involved in rendering.
*
* Includes buffers having the contents of their GPU caches
* flushed, not necessarily primitives. last_rendering_seqno
* represents when the rendering involved will be completed.
*
* A reference is held on the buffer while on this list.
*/
struct list_head active_list;
 
/**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
* last_rendering_seqno is 0 while an object is in this list.
*
* A reference is not held on the buffer while on this list,
* as merely being GTT-bound shouldn't prevent its being
* freed, and we'll pull it off the list in the free path.
*/
struct list_head inactive_list;
 
/** LRU list of objects with fence regs on them. */
struct list_head fence_list;
 
/**
* We leave the user IRQ off as much as possible,
* but this means that requests will finish and never
* be retired once the system goes idle. Set a timer to
* fire periodically while the ring is running. When it
* fires, go retire requests.
*/
struct delayed_work retire_work;
 
/**
* Are we in a non-interruptible section of code like
* modesetting?
*/
bool interruptible;
 
/**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
* This is set between LeaveVT and EnterVT. It needs to be
* replaced with a semaphore. It also needs to be
* transitioned away from for kernel modesetting.
*/
int suspended;
 
/**
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
* It prevents command submission from occurring and makes
* every pending request fail
*/
atomic_t wedged;
 
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
uint32_t bit_6_swizzle_y;
 
/* storage for physical objects */
// struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
/* accounting, useful for userland debugging */
size_t gtt_total;
size_t mappable_gtt_total;
size_t object_memory;
u32 object_count;
} mm;
 
/* Kernel Modesetting */
 
struct sdvo_device_mapping sdvo_mappings[2];
908,7 → 1031,7
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
 
unsigned long last_gpu_reset;
struct i915_gpu_error gpu_error;
 
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
927,7 → 1050,7
bool hw_contexts_disabled;
uint32_t hw_context_size;
 
bool fdi_rx_polarity_reversed;
u32 fdi_rx_config;
 
struct i915_suspend_saved_registers regfile;
 
948,11 → 1071,7
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
 
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
 
struct drm_i915_gem_object_ops {
/* Interface between the GEM object and its backing storage.
977,10 → 1096,10
 
const struct drm_i915_gem_object_ops *ops;
 
// void *mapped;
 
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node *stolen;
struct list_head gtt_list;
 
/** This object's place on the active/inactive lists */
1065,7 → 1184,6
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
 
// dma_addr_t *allocated_pages;
struct sg_table *pages;
int pages_pin_count;
 
1107,13 → 1225,6
 
/** for phy allocated objects */
struct drm_i915_gem_phys_object *phys_obj;
 
/**
* Number of crtcs where this object is currently the fb, but
* will be page flipped away on the next vblank. When it
* reaches 0, dev_priv->pending_flip_queue will be woken up.
*/
atomic_t pending_flip;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
 
1152,7 → 1263,7
 
struct drm_i915_file_private {
struct {
struct spinlock lock;
spinlock_t lock;
struct list_head request_list;
} mm;
struct idr context_idr;
1238,6 → 1349,8
 
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
#define HAS_DDI(dev) (IS_HASWELL(dev))
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1293,6 → 1406,7
extern bool i915_enable_hangcheck __read_mostly;
extern int i915_enable_ppgtt __read_mostly;
extern unsigned int i915_preliminary_hw_support __read_mostly;
extern int i915_disable_power_well __read_mostly;
 
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1329,6 → 1443,7
void i915_handle_error(struct drm_device *dev, bool wedged);
 
extern void intel_irq_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
 
1397,6 → 1512,8
int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
void *i915_gem_object_alloc(struct drm_device *dev);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
1403,6 → 1520,7
struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
 
int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
uint32_t alignment,
bool map_and_fenceable,
1409,6 → 1527,7
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
 
1460,8 → 1579,8
return (int32_t)(seq1 - seq2) >= 0;
}
 
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 
int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
1487,9 → 1606,19
 
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
& I915_RESET_IN_PROGRESS_FLAG);
}
 
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter) == I915_WEDGED;
}
 
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1529,9 → 1658,10
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode);
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced);
 
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
1552,7 → 1682,6
struct drm_file *file);
 
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
1566,12 → 1695,10
enum i915_cache_level cache_level);
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
void i915_gem_init_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end);
void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
1589,9 → 1716,22
 
/* i915_gem_stolen.c */
int i915_gem_init_stolen(struct drm_device *dev);
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
void i915_gem_cleanup_stolen(struct drm_device *dev);
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
 
/* i915_gem_tiling.c */
inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
}
 
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1617,9 → 1757,9
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
 
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
/* i915_ums.c */
void i915_save_display_reg(struct drm_device *dev);
void i915_restore_display_reg(struct drm_device *dev);
 
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
1676,6 → 1816,7
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1748,6 → 1889,21
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
 
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
 
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
if (HAS_PCH_SPLIT(dev))
return CPU_VGACNTRL;
else if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
else
return VGACNTRL;
}
 
typedef struct
{
int width;
/drivers/video/drm/i915/i915_gem.c
118,14 → 118,12
}
 
static int
i915_gem_wait_for_error(struct drm_device *dev)
i915_gem_wait_for_error(struct i915_gpu_error *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion;
unsigned long flags;
int ret;
 
if (!atomic_read(&dev_priv->mm.wedged))
#define EXIT_COND (!i915_reset_in_progress(error))
if (EXIT_COND)
return 0;
#if 0
/*
133,7 → 131,9
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
ret = wait_event_interruptible_timeout(error->reset_queue,
EXIT_COND,
10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
141,17 → 141,8
return ret;
}
 
if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
* will never happen.
*/
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
spin_unlock_irqrestore(&x->wait.lock, flags);
}
#endif
#undef EXIT_COND
 
return 0;
}
158,13 → 149,16
 
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = i915_gem_wait_for_error(dev);
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
 
mutex_lock(&dev->struct_mutex);
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
 
WARN_ON(i915_verify_lists(dev));
return 0;
183,6 → 177,7
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
197,8 → 192,9
return -ENODEV;
 
mutex_lock(&dev->struct_mutex);
i915_gem_init_global_gtt(dev, args->gtt_start,
args->gtt_end, args->gtt_end);
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end);
dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
 
return 0;
221,12 → 217,24
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
 
args->aper_size = dev_priv->mm.gtt_total;
args->aper_size = dev_priv->gtt.total;
args->aper_available_size = args->aper_size - pinned;
 
return 0;
}
 
void *i915_gem_object_alloc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return kmalloc(sizeof(struct drm_i915_gem_object), 0);
}
 
void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
kfree(obj);
}
 
static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
297,13 → 305,7
args->size, &args->handle);
}
 
static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
obj->tiling_mode != I915_TILING_NONE;
}
#if 0
 
static inline int
446,7 → 448,6
loff_t offset;
int shmem_page_offset, page_length, ret = 0;
int obj_do_bit17_swizzling, page_do_bit17_swizzling;
int hit_slowpath = 0;
int prefaulted = 0;
int needs_clflush = 0;
struct scatterlist *sg;
508,7 → 509,6
if (ret == 0)
goto next_page;
 
hit_slowpath = 1;
mutex_unlock(&dev->struct_mutex);
 
if (!prefaulted) {
541,12 → 541,6
out:
i915_gem_object_unpin_pages(obj);
 
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
i915_gem_object_truncate(obj);
}
 
return ret;
}
 
888,12 → 882,13
i915_gem_object_unpin_pages(obj);
 
if (hit_slowpath) {
/* Fixup: Kill any reinstated backing storage pages */
if (obj->madv == __I915_MADV_PURGED)
i915_gem_object_truncate(obj);
/* and flush dirty cachelines in case the object isn't in the cpu write
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
/*
* Fixup: Flush cpu caches in case we didn't flush the dirty
* cachelines in-line while writing and the object moved
* out of the cpu write domain while we've dropped the lock.
*/
if (!needs_clflush_after &&
obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
i915_gem_chipset_flush(dev);
}
918,6 → 913,12
struct drm_i915_gem_object *obj;
int ret;
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
return 0;
}
 
if (args->size == 0)
return 0;
 
980,26 → 981,17
}
 
int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
unsigned long flags;
 
/* Give the error handler a chance to run. */
spin_lock_irqsave(&x->wait.lock, flags);
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
 
if (i915_reset_in_progress(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
 
/* Recovery complete, but still wedged means reset failure. */
if (recovery_complete)
/* Recovery complete, but the reset failed ... */
if (i915_terminally_wedged(error))
return -EIO;
 
return -EAGAIN;
1030,13 → 1022,22
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
* Note: It is of utmost importance that the passed in seqno and reset_counter
* values have been read by the caller in an smp safe manner. Where read-side
* locks are involved, it is sufficient to read the reset_counter before
* unlocking the lock that protects the seqno. For lockless tricks, the
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned reset_counter,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
1066,7 → 1067,8
 
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
i915_reset_in_progress(&dev_priv->gpu_error) || \
reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
1076,7 → 1078,14
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
 
ret = i915_gem_check_wedge(dev_priv, interruptible);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
end = -EAGAIN;
 
/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
* gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
1122,7 → 1131,7
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
 
ret = i915_gem_check_wedge(dev_priv, interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
 
1130,7 → 1139,9
if (ret)
return ret;
 
return __wait_seqno(ring, seqno, interruptible, NULL);
return __wait_seqno(ring, seqno,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL);
}
 
/**
1177,6 → 1188,7
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
 
1187,7 → 1199,7
if (seqno == 0)
return 0;
 
ret = i915_gem_check_wedge(dev_priv, true);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
 
1195,8 → 1207,9
if (ret)
return ret;
 
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, true, NULL);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex);
 
i915_gem_retire_requests_ring(ring);
1227,6 → 1240,13
uint32_t write_domain = args->write_domain;
int ret;
 
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
return 0;
}
 
/* Only handle setting domains to types used by the CPU. */
if (write_domain & I915_GEM_GPU_DOMAINS)
return -EINVAL;
1298,6 → 1318,12
struct drm_gem_object *obj;
unsigned long addr = 0;
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
return 0;
}
 
obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
1364,7 → 1390,7
obj->fault_mappable = false;
}
 
static uint32_t
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
uint32_t gtt_size;
1392,16 → 1418,15
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
*/
static uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode)
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced)
{
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (INTEL_INFO(dev)->gen >= 4 ||
if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
tiling_mode == I915_TILING_NONE)
return 4096;
 
1441,6 → 1466,104
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
 
int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
unsigned long pfn;
char *mem, *ptr;
int ret;
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
 
obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
if (&obj->base == NULL) {
ret = -ENOENT;
goto unlock;
}
 
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
/* Now bind it into the GTT if needed */
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret)
goto out;
 
ret = i915_gem_object_set_to_gtt_domain(obj, 1);
if (ret)
goto unpin;
 
ret = i915_gem_object_get_fence(obj);
if (ret)
goto unpin;
 
obj->fault_mappable = true;
 
pfn = dev_priv->gtt.mappable_base + obj->gtt_offset;
 
/* Finally, remap it using the new GTT offset */
 
mem = UserAlloc(obj->base.size);
if(unlikely(mem == NULL))
{
ret = -ENOMEM;
goto unpin;
}
 
for(ptr = mem; ptr < mem + obj->base.size; ptr+= 4096, pfn+= 4096)
MapPage(ptr, pfn, PG_SHARED|PG_UW);
 
unpin:
i915_gem_object_unpin(obj);
 
 
*offset = (u64)mem;
 
out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
 
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
* @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
* up so we can get faults in the handler above.
*
* The fault handler will take care of binding the object into the GTT
* (since it may have been evicted to make room for something), allocating
* a fence register, and mapping the appropriate aperture address into
* userspace.
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_mmap_gtt *args = data;
 
return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
 
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1504,7 → 1627,7
kfree(obj->pages);
}
 
static int
int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
1669,9 → 1792,6
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
 
if (obj->pin_count) /* are we a framebuffer? */
intel_mark_fb_idle(obj);
 
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
list_del_init(&obj->ring_list);
1691,30 → 1811,24
}
 
static int
i915_gem_handle_seqno_wrap(struct drm_device *dev)
i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i, j;
 
/* The hardware uses various monotonic 32-bit counters, if we
* detect that they will wraparound we need to idle the GPU
* and reset those counters.
*/
ret = 0;
/* Carefully retire all requests without writing to the rings */
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ret |= ring->sync_seqno[j] != 0;
}
if (ret == 0)
return ret;
 
ret = i915_gpu_idle(dev);
ret = intel_ring_idle(ring);
if (ret)
return ret;
}
i915_gem_retire_requests(dev);
 
i915_gem_retire_requests(dev);
/* Finally reset hw state */
for_each_ring(ring, dev_priv, i) {
intel_ring_init_seqno(ring, seqno);
 
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
1722,6 → 1836,32
return 0;
}
 
int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (seqno == 0)
return -EINVAL;
 
/* HWS page needs to be set less than what we
* will inject to ring
*/
ret = i915_gem_init_seqno(dev, seqno - 1);
if (ret)
return ret;
 
/* Carefully set the last_seqno value so that wrap
* detection still works
*/
dev_priv->next_seqno = seqno;
dev_priv->last_seqno = seqno - 1;
if (dev_priv->last_seqno == 0)
dev_priv->last_seqno--;
 
return 0;
}
 
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
1729,7 → 1869,7
 
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
int ret = i915_gem_handle_seqno_wrap(dev);
int ret = i915_gem_init_seqno(dev, 0);
if (ret)
return ret;
 
1736,7 → 1876,7
dev_priv->next_seqno = 1;
}
 
*seqno = dev_priv->next_seqno++;
*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
return 0;
}
 
2126,9 → 2266,6
{
u32 old_write_domain, old_read_domains;
 
/* Act a barrier for all accesses through the GTT */
mb();
 
/* Force a pagefault for domain tracking on next user access */
// i915_gem_release_mmap(obj);
 
2135,6 → 2272,9
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
 
/* Wait for any direct GTT access to complete */
mb();
 
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
 
2153,7 → 2293,7
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
int ret;
 
if(obj == get_fb_obj())
return 0;
2223,37 → 2363,22
return 0;
}
 
static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
uint64_t val;
 
if (obj) {
u32 size = obj->gtt_space->size;
 
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
 
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
} else {
fence_reg = FENCE_REG_965_0;
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
 
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint64_t val;
 
if (obj) {
u32 size = obj->gtt_space->size;
 
2260,7 → 2385,7
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
2267,8 → 2392,9
} else
val = 0;
 
I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
POSTING_READ(FENCE_REG_965_0 + reg * 8);
fence_reg += reg * 8;
I915_WRITE64(fence_reg, val);
POSTING_READ(fence_reg);
}
 
static void i915_write_fence_reg(struct drm_device *dev, int reg,
2347,18 → 2473,37
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
 
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
{
return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
}
 
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
 
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
case 6:
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
default: break;
default: BUG();
}
 
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
*/
if (i915_gem_object_needs_mb(obj))
mb();
}
 
static inline int fence_number(struct drm_i915_private *dev_priv,
2388,7 → 2533,7
}
 
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
2398,12 → 2543,6
obj->last_fenced_seqno = 0;
}
 
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
 
obj->fenced_gpu_access = false;
return 0;
}
2414,7 → 2553,7
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
 
ret = i915_gem_object_flush_fence(obj);
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
 
2488,7 → 2627,7
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
ret = i915_gem_object_flush_fence(obj);
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
2509,7 → 2648,7
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
 
ret = i915_gem_object_flush_fence(old);
ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
 
2532,7 → 2671,7
 
/* On non-LLC machines we have to be careful when putting differing
* types of snoopable memory together to avoid the prefetcher
* crossing memory domains and dieing.
* crossing memory domains and dying.
*/
if (HAS_LLC(dev))
return true;
2610,21 → 2749,16
bool mappable, fenceable;
int ret;
 
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
}
 
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size,
obj->tiling_mode);
obj->tiling_mode, true);
unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev,
i915_gem_get_gtt_alignment(dev,
obj->base.size,
obj->tiling_mode);
obj->tiling_mode, false);
 
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
2640,7 → 2774,7
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
(map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
(map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
2661,7 → 2795,7
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end);
0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
2695,7 → 2829,7
(node->start & (fence_alignment - 1)) == 0;
 
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
 
obj->map_and_fenceable = mappable && fenceable;
 
2715,6 → 2849,13
if (obj->pages == NULL)
return;
 
/*
* Stolen memory is always coherent with the GPU as it is explicitly
* marked as wc by the system, or the system is cache-coherent.
*/
if (obj->stolen)
return;
 
/* If the GPU is snooping the contents of the CPU cache,
* we do not need to manually clear the CPU cache lines. However,
* the caches are only snooped when the render cache is
2848,6 → 2989,13
 
i915_gem_object_flush_cpu_write_domain(obj);
 
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
 
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
 
2955,6 → 3103,12
struct drm_i915_gem_object *obj;
int ret;
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
return 0;
}
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
2981,6 → 3135,12
enum i915_cache_level level;
int ret;
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
return 0;
}
 
switch (args->caching) {
case I915_CACHING_NONE:
level = I915_CACHE_NONE;
3154,12 → 3314,18
unsigned long recent_enough = GetTimerTicks() - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
 
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
 
ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
if (ret)
return ret;
 
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
if (time_after_eq(request->emitted_jiffies, recent_enough))
3168,12 → 3334,13
ring = request->ring;
seqno = request->seqno;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
spin_unlock(&file_priv->mm.lock);
 
if (seqno == 0)
return 0;
 
ret = __wait_seqno(ring, seqno, true, NULL);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
 
3247,6 → 3414,12
struct drm_i915_gem_object *obj;
int ret;
 
if(args->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, args->handle);
return 0;
}
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
3344,6 → 3517,12
if (ret)
return ret;
 
if(args->handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
ret = -ENOENT;
3454,7 → 3633,7
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
u32 mask;
gfp_t mask;
 
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (obj == NULL)
3565,6 → 3744,10
}
i915_gem_retire_requests(dev);
 
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_gem_evict_everything(dev);
 
i915_gem_reset_fences(dev);
 
/* Hack! Don't let anybody do execbuf while we don't control the chip.
3572,7 → 3755,7
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->hangcheck_timer);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
 
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
3592,7 → 3775,7
u32 misccpctl;
int i;
 
if (!IS_IVYBRIDGE(dev))
if (!HAS_L3_GPU_CACHE(dev))
return;
 
if (!dev_priv->l3_parity.remap_info)
3635,8 → 3818,10
I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
if (IS_GEN6(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (IS_GEN7(dev))
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
else
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
BUG();
}
 
static bool
3655,22 → 3840,11
return true;
}
 
int
i915_gem_init_hw(struct drm_device *dev)
static int i915_gem_init_rings(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
 
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
 
i915_gem_l3_remap(dev);
 
i915_gem_init_swizzling(dev);
 
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
3687,37 → 3861,50
goto cleanup_bsd_ring;
}
 
dev_priv->next_seqno = 1;
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
goto cleanup_blt_ring;
 
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
*/
i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
 
return 0;
 
cleanup_blt_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
cleanup_bsd_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
 
return ret;
}
 
static bool
intel_enable_ppgtt(struct drm_device *dev)
int
i915_gem_init_hw(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
 
return true;
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
I915_WRITE(0x9008, I915_READ(0x9008) | 0xf0000);
 
i915_gem_l3_remap(dev);
 
i915_gem_init_swizzling(dev);
 
ret = i915_gem_init_rings(dev);
if (ret)
return ret;
 
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
*/
i915_gem_context_init(dev);
i915_gem_init_ppgtt(dev);
 
return 0;
}
 
#define LFB_SIZE 0xC00000
3725,39 → 3912,10
int i915_gem_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
int ret;
 
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
mutex_lock(&dev->struct_mutex);
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
 
i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE);
 
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_init_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size - LFB_SIZE);
}
 
i915_gem_init_global_gtt(dev);
ret = i915_gem_init_hw(dev);
mutex_unlock(&dev->struct_mutex);
if (ret) {
3791,9 → 3949,9
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
 
if (atomic_read(&dev_priv->mm.wedged)) {
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
atomic_set(&dev_priv->mm.wedged, 0);
atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
 
mutex_lock(&dev->struct_mutex);
3858,8 → 4016,8
void
i915_gem_load(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
drm_i915_private_t *dev_priv = dev->dev_private;
 
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3872,6 → 4030,7
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
/drivers/video/drm/i915/i915_gem_context.c
128,13 → 128,8
 
static void do_destroy(struct i915_hw_context *ctx)
{
struct drm_device *dev = ctx->obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (ctx->file_priv)
idr_remove(&ctx->file_priv->context_idr, ctx->id);
else
BUG_ON(ctx != dev_priv->ring[RCS].default_context);
 
drm_gem_object_unreference(&ctx->obj->base);
kfree(ctx);
146,7 → 141,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_context *ctx;
int ret, id;
int ret;
 
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
171,23 → 166,12
 
ctx->file_priv = file_priv;
 
again:
if (idr_pre_get(&file_priv->context_idr, GFP_KERNEL) == 0) {
ret = -ENOMEM;
DRM_DEBUG_DRIVER("idr allocation failed\n");
ret = idr_alloc(&file_priv->context_idr, ctx, DEFAULT_CONTEXT_ID + 1, 0,
GFP_KERNEL);
if (ret < 0)
goto err_out;
}
ctx->id = ret;
 
ret = idr_get_new_above(&file_priv->context_idr, ctx,
DEFAULT_CONTEXT_ID + 1, &id);
if (ret == 0)
ctx->id = id;
 
if (ret == -EAGAIN)
goto again;
else if (ret)
goto err_out;
 
return ctx;
 
err_out:
245,11 → 229,7
void i915_gem_context_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ctx_size;
 
dev_priv->hw_contexts_disabled = true;
return;
 
#if 0
if (!HAS_HW_CONTEXTS(dev)) {
dev_priv->hw_contexts_disabled = true;
261,11 → 241,9
dev_priv->ring[RCS].default_context)
return;
 
ctx_size = get_context_size(dev);
dev_priv->hw_context_size = get_context_size(dev);
dev_priv->hw_context_size = round_up(dev_priv->hw_context_size, 4096);
dev_priv->hw_context_size = round_up(get_context_size(dev), 4096);
 
if (ctx_size <= 0 || ctx_size > (1<<20)) {
if (dev_priv->hw_context_size > (1<<20)) {
dev_priv->hw_contexts_disabled = true;
return;
}
/drivers/video/drm/i915/i915_gem_execbuffer.c
63,25 → 63,43
}
 
struct eb_objects {
struct list_head objects;
int and;
union {
struct drm_i915_gem_object *lut[0];
struct hlist_head buckets[0];
};
};
 
static struct eb_objects *
eb_create(int size)
eb_create(struct drm_i915_gem_execbuffer2 *args)
{
struct eb_objects *eb;
struct eb_objects *eb = NULL;
 
if (args->flags & I915_EXEC_HANDLE_LUT) {
int size = args->buffer_count;
size *= sizeof(struct drm_i915_gem_object *);
size += sizeof(struct eb_objects);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
 
if (eb == NULL) {
int size = args->buffer_count;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_KERNEL);
GFP_TEMPORARY);
if (eb == NULL)
return eb;
 
eb->and = count - 1;
} else
eb->and = -args->buffer_count;
 
INIT_LIST_HEAD(&eb->objects);
return eb;
}
 
88,36 → 106,93
static void
eb_reset(struct eb_objects *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
 
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
static int
eb_lookup_objects(struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
{
int i;
 
spin_lock(&file->table_lock);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
 
if(exec[i].handle == -2)
obj = get_fb_obj();
else
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
return -ENOENT;
}
 
if (!list_empty(&obj->exec_list)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
return -EINVAL;
}
 
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->exec_list, &eb->objects);
 
obj->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = obj;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
obj->exec_handle = handle;
hlist_add_head(&obj->exec_node,
&eb->buckets[obj->exec_handle & eb->and]);
&eb->buckets[handle & eb->and]);
}
}
spin_unlock(&file->table_lock);
 
return 0;
}
 
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
return NULL;
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
struct drm_i915_gem_object *obj;
 
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct drm_i915_gem_object *obj;
 
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
 
return NULL;
}
}
 
static void
eb_destroy(struct eb_objects *eb)
{
while (!list_empty(&eb->objects)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
kfree(eb);
}
 
179,17 → 254,6
reloc->write_domain);
return ret;
}
if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
reloc->write_domain != target_obj->pending_write_domain)) {
DRM_DEBUG("Write domain conflict: "
"obj %p target %d offset %d "
"new %08x old %08x\n",
obj, reloc->target_handle,
(int) reloc->offset,
reloc->write_domain,
target_obj->pending_write_domain);
return ret;
}
 
target_obj->pending_read_domains |= reloc->read_domains;
target_obj->pending_write_domain |= reloc->write_domain;
218,10 → 282,7
}
 
/* We can't wait for rendering with pagefaults disabled */
// if (obj->active && in_atomic())
// return -EFAULT;
 
 
reloc->delta += target_offset;
if (use_cpu_reloc(obj)) {
uint32_t page_offset = reloc->offset & ~PAGE_MASK;
324,8 → 385,7
 
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects)
struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
338,7 → 398,7
* lockdep complains vehemently.
*/
// pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
360,7 → 420,8
 
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
struct intel_ring_buffer *ring,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
409,9 → 470,20
obj->has_aliasing_ppgtt_mapping = 1;
}
 
if (entry->offset != obj->gtt_offset) {
entry->offset = obj->gtt_offset;
// LEAVE();
*need_reloc = true;
}
 
if (entry->flags & EXEC_OBJECT_WRITE) {
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
 
if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
 
return 0;
}
 
437,7 → 509,8
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
struct list_head *objects,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
467,7 → 540,7
else
list_move_tail(&obj->exec_list, &ordered_objects);
 
obj->base.pending_read_domains = 0;
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
507,7 → 580,7
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = i915_gem_execbuffer_reserve_object(obj, ring);
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
517,7 → 590,7
if (obj->gtt_space)
continue;
 
ret = i915_gem_execbuffer_reserve_object(obj, ring);
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
540,21 → 613,22
 
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
int count = args->buffer_count;
 
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
obj = list_first_entry(objects,
while (!list_empty(&eb->objects)) {
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
622,34 → 696,16
 
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
 
if(exec[i].handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
ret = eb_lookup_objects(eb, exec, args, file);
if (ret)
goto err;
}
 
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
 
ret = i915_gem_execbuffer_reserve(ring, file, objects);
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
 
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
670,44 → 726,11
}
 
static int
i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
{
u32 plane, flip_mask;
int ret;
 
/* Check for any pending flips. As we only maintain a flip queue depth
* of 1, we can simply insert a WAIT for the next display flip prior
* to executing the batch and avoid stalling the CPU.
*/
 
for (plane = 0; flips >> plane; plane++) {
if (((flips >> plane) & 1) == 0)
continue;
 
if (plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
 
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
}
 
return 0;
}
 
static int
i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
struct list_head *objects)
{
struct drm_i915_gem_object *obj;
uint32_t flush_domains = 0;
uint32_t flips = 0;
int ret;
 
list_for_each_entry(obj, objects, exec_list) {
718,18 → 741,9
if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
i915_gem_clflush_object(obj);
 
if (obj->base.pending_write_domain)
flips |= atomic_read(&obj->pending_flip);
 
flush_domains |= obj->base.write_domain;
}
 
if (flips) {
ret = i915_gem_execbuffer_wait_for_flips(ring, flips);
if (ret)
return ret;
}
 
if (flush_domains & I915_GEM_DOMAIN_CPU)
i915_gem_chipset_flush(ring->dev);
 
745,6 → 759,9
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
return false;
 
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
 
753,21 → 770,26
int count)
{
int i;
int relocs_total = 0;
int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
 
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
 
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
return -EINVAL;
 
/* First check for malicious input causing overflow in
* the worst case where we need to allocate the entire
* relocation tree as a single array.
*/
if (exec[i].relocation_count > relocs_max - relocs_total)
return -EINVAL;
relocs_total += exec[i].relocation_count;
 
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
// if (!access_ok(VERIFY_READ, ptr, length))
// return -EFAULT;
 
/* we may also need to update the presumed offsets */
// if (!access_ok(VERIFY_WRITE, ptr, length))
// return -EFAULT;
789,8 → 811,10
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
 
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
 
i915_gem_object_move_to_active(obj, ring);
849,7 → 873,6
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
856,12 → 879,12
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask;
u32 flags;
u32 mask, flags;
int ret, mode, i;
bool need_relocs;
 
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
if (!i915_gem_check_execbuffer(args))
{
FAIL();
return -EINVAL;
}
875,8 → 898,6
 
flags = 0;
if (args->flags & I915_EXEC_SECURE) {
// if (!file->is_master || !capable(CAP_SYS_ADMIN))
// return -EPERM;
 
flags |= I915_DISPATCH_SECURE;
}
989,7 → 1010,7
goto pre_mutex_err;
}
 
eb = eb_create(args->buffer_count);
eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
997,60 → 1018,28
}
 
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
 
if(exec[i].handle == -2)
{
obj = get_fb_obj();
drm_gem_object_reference(&obj->base);
}
else
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
 
// printf("%s object %p handle %d\n", __FUNCTION__, obj, exec[i].handle);
 
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
ret = eb_lookup_objects(eb, exec, args, file);
if (ret)
goto err;
}
 
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
 
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
 
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
 
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
 
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (need_relocs)
ret = i915_gem_execbuffer_relocate(dev, eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
1072,7 → 1061,7
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
 
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
if (ret)
goto err;
 
1104,18 → 1093,7
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
exec_len = args->batch_len;
if (cliprects) {
// for (i = 0; i < args->num_cliprects; i++) {
// ret = i915_emit_box(dev, &cliprects[i],
// args->DR1, args->DR4);
// if (ret)
// goto err;
 
// ret = ring->dispatch_execbuffer(ring,
// exec_start, exec_len,
// flags);
// if (ret)
// goto err;
// }
} else {
ret = ring->dispatch_execbuffer(ring,
exec_start, exec_len,
1126,30 → 1104,21
 
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
 
i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
 
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
 
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
 
mutex_unlock(&dev->struct_mutex);
 
pre_mutex_err:
kfree(cliprects);
 
 
return ret;
}
 
 
 
int
i915_gem_execbuffer2(struct drm_device *dev, void *data,
struct drm_file *file)
1167,11 → 1136,8
return -EINVAL;
}
 
exec2_list = malloc(sizeof(*exec2_list)*args->buffer_count);
 
// if (exec2_list == NULL)
// exec2_list = drm_malloc_ab(sizeof(*exec2_list),
// args->buffer_count);
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL) {
DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
args->buffer_count);
/drivers/video/drm/i915/i915_gem_gtt.c
24,6 → 24,12
 
#define iowrite32(v, addr) writel((v), (addr))
 
#define AGP_NORMAL_MEMORY 0
 
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
30,10 → 36,6
#include "i915_trace.h"
#include "intel_drv.h"
 
#define AGP_USER_TYPES (1 << 16)
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
typedef uint32_t gtt_pte_t;
 
/* PPGTT stuff */
50,7 → 52,7
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
static inline gtt_pte_t pte_encode(struct drm_device *dev,
static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
83,7 → 85,7
}
 
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
93,15 → 95,16
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
 
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
scratch_pte = gen6_pte_encode(ppgtt->dev,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
 
pt_vaddr = AllocKernelSpace(4096);
 
if(pt_vaddr != NULL)
{
while (num_entries)
{
if(pt_vaddr == NULL)
return;
 
while (num_entries) {
last_pte = first_pte + num_entries;
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
114,15 → 117,81
num_entries -= last_pte - first_pte;
first_pte = 0;
act_pd++;
};
 
FreeKernelSpace(pt_vaddr);
}
 
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
dma_addr_t page_addr;
struct scatterlist *sg;
 
/* init sg walking */
sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
 
pt_vaddr = AllocKernelSpace(4096);
 
if(pt_vaddr == NULL)
return;
 
while (i < pages->nents) {
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3);
 
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
 
/* grab the next page */
if (++m == segment_len) {
if (++i == pages->nents)
break;
 
sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
}
 
first_pte = 0;
act_pd++;
}
FreeKernelSpace(pt_vaddr);
};
}
 
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
int i;
 
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
 
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
 
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
130,17 → 199,17
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
first_pd_entry_in_global_pt =
gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ret;
 
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
goto err_ppgtt;
return -ENOMEM;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
148,10 → 217,7
goto err_pt_alloc;
}
 
/*
if (dev_priv->mm.gtt->needs_dmar) {
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
*ppgtt->num_pd_entries,
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
159,126 → 225,75
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
PCI_DMA_BIDIRECTIONAL);
 
if (pci_dma_mapping_error(dev->pdev,
pt_addr)) {
ret = -EIO;
goto err_pd_pin;
 
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
}
*/
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
 
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
 
ppgtt->clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
 
dev_priv->mm.aliasing_ppgtt = ppgtt;
 
return 0;
 
err_pd_pin:
// if (ppgtt->pt_dma_addr) {
// for (i--; i >= 0; i--)
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
// 4096, PCI_DMA_BIDIRECTIONAL);
// }
if (ppgtt->pt_dma_addr) {
for (i--; i >= 0; i--)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
err_pt_alloc:
// kfree(ppgtt->pt_dma_addr);
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
FreePage((addr_t)(ppgtt->pt_pages[i]));
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
err_ppgtt:
kfree(ppgtt);
 
return ret;
}
 
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
struct i915_hw_ppgtt *ppgtt;
int ret;
 
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return;
return -ENOMEM;
 
// if (ppgtt->pt_dma_addr) {
// for (i = 0; i < ppgtt->num_pd_entries; i++)
// pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
// 4096, PCI_DMA_BIDIRECTIONAL);
// }
ppgtt->dev = dev;
 
// kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
FreePage((addr_t)(ppgtt->pt_pages[i]));
kfree(ppgtt->pt_pages);
ret = gen6_ppgtt_init(ppgtt);
if (ret)
kfree(ppgtt);
else
dev_priv->mm.aliasing_ppgtt = ppgtt;
 
return ret;
}
 
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j, m, segment_len;
dma_addr_t page_addr;
struct scatterlist *sg;
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
 
/* init sg walking */
sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
 
pt_vaddr = AllocKernelSpace(4096);
if( pt_vaddr == NULL)
if (!ppgtt)
return;
 
while (i < pages->nents) {
MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3);
 
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
cache_level);
 
/* grab the next page */
if (++m == segment_len) {
if (++i == pages->nents)
break;
 
sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
ppgtt->cleanup(ppgtt);
}
}
 
 
first_pte = 0;
act_pd++;
}
FreeKernelSpace(pt_vaddr);
}
 
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
ppgtt->insert_entries(ppgtt, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
cache_level);
}
286,7 → 301,7
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
i915_ppgtt_clear_range(ppgtt,
ppgtt->clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
297,7 → 312,7
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
gtt_pte_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
 
305,15 → 320,11
return;
 
 
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
 
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
 
353,11 → 364,27
}
}
 
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
 
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
 
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
if (unlikely(dev_priv->gtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
371,39 → 398,10
 
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
if (unlikely(dev_priv->mm.gtt->do_idle_maps))
if (unlikely(dev_priv->gtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
 
 
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
 
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
 
 
#if 0
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
410,8 → 408,8
struct drm_i915_gem_object *obj;
 
/* First fill our portion of the GTT with scratch pages */
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
dev_priv->gtt.total / PAGE_SIZE);
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
420,30 → 418,17
 
i915_gem_chipset_flush(dev);
}
#endif
 
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct scatterlist *sg, *s;
unsigned int nents ;
int i;
 
if (obj->has_dma_mapping)
return 0;
 
sg = obj->pages->sgl;
nents = obj->pages->nents;
if (!dma_map_sg(&obj->base.dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL))
return -ENOSPC;
 
 
WARN_ON(nents == 0 || sg[0].length == 0);
 
for_each_sg(sg, s, nents, i) {
BUG_ON(!sg_page(s));
s->dma_address = sg_phys(s);
}
 
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
 
return 0;
}
 
453,16 → 438,15
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
static void gen6_ggtt_insert_entries(struct drm_device *dev,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
gtt_pte_t __iomem *gtt_entries =
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
471,14 → 455,12
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
iowrite32(gen6_pte_encode(dev, addr, level),
&gtt_entries[i]);
i++;
}
}
 
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
 
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
486,7 → 468,8
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
WARN_ON(readl(&gtt_entries[i-1])
!= gen6_pte_encode(dev, addr, level));
 
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
496,26 → 479,68
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
 
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
static void gen6_ggtt_clear_range(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
 
 
static void i915_ggtt_insert_entries(struct drm_device *dev,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
 
intel_gtt_insert_sg_entries(st, pg_start, flags);
 
}
 
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries)
{
intel_gtt_clear_range(first_entry, num_entries);
}
 
 
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
cache_level);
 
obj->has_global_gtt_mapping = 1;
}
 
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
i915_ggtt_clear_range(obj->base.dev,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->gtt.gtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
 
530,10 → 555,10
 
interruptible = do_idling(dev_priv);
 
// if (!obj->has_dma_mapping)
// dma_unmap_sg(&dev->pdev->dev,
// obj->pages->sgl, obj->pages->nents,
// PCI_DMA_BIDIRECTIONAL);
if (!obj->has_dma_mapping)
dma_unmap_sg(&dev->pdev->dev,
obj->pages->sgl, obj->pages->nents,
PCI_DMA_BIDIRECTIONAL);
 
undo_idling(dev_priv, interruptible);
}
554,29 → 579,104
*end -= 4096;
}
}
 
void i915_gem_init_global_gtt(struct drm_device *dev,
void i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently prefetches
* past the end of the object, and we've seen multiple hangs with the
* GPU head pointer stuck in a batchbuffer bound at the last page of the
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
 
/* Substract the guard page ... */
BUG_ON(mappable_end > end);
 
/* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
 
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
/* Mark any preallocated objects as occupied */
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
obj->gtt_offset, obj->base.size);
 
/* ... but ensure that we clear the entire range. */
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
obj->gtt_offset,
obj->base.size,
false);
obj->has_global_gtt_mapping = 1;
}
 
dev_priv->gtt.start = start;
dev_priv->gtt.total = end - start;
 
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
(hole_end-hole_start) / PAGE_SIZE);
}
 
/* And finally clear the reserved guard page */
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
}
 
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
if (i915_enable_ppgtt >= 0)
return i915_enable_ppgtt;
 
#ifdef CONFIG_INTEL_IOMMU
/* Disable ppgtt on SNB if VT-d is on. */
if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
return false;
#endif
 
return true;
}
 
void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
 
gtt_size = dev_priv->gtt.total;
mappable_size = dev_priv->gtt.mappable_end;
 
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
int ret;
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
gtt_size -= LFB_SIZE;
 
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
 
ret = i915_gem_init_aliasing_ppgtt(dev);
if (!ret)
return;
 
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->mm.gtt_space);
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
}
i915_gem_setup_global_gtt(dev, LFB_SIZE, mappable_size, gtt_size);
}
 
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
586,6 → 686,8
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
get_page(page);
set_pages_uc(page, 1);
 
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
595,12 → 697,21
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
dev_priv->gtt.scratch_page = page;
dev_priv->gtt.scratch_page_dma = dma_addr;
 
return 0;
}
 
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->gtt.scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->gtt.scratch_page);
__free_page(dev_priv->gtt.scratch_page);
}
 
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
609,7 → 720,7
return snb_gmch_ctl << 20;
}
 
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
616,7 → 727,7
return snb_gmch_ctl << 25; /* 32 MB units */
}
 
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
625,92 → 736,131
return stolen_decoder[snb_gmch_ctl] << 20;
}
 
int i915_gem_gtt_init(struct drm_device *dev)
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
 
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
*mappable_base = pci_resource_start(dev->pdev, 2);
*mappable_end = pci_resource_len(dev->pdev, 2);
 
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
DRM_ERROR("Unknown GMADR size (%lx)\n",
dev_priv->gtt.mappable_end);
return -ENXIO;
}
 
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
return -ENODEV;
}
return 0;
}
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
 
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
if (IS_GEN7(dev))
*stolen = gen7_get_stolen_size(snb_gmch_ctl);
else
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
 
*gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
 
#ifdef CONFIG_INTEL_IOMMU
dev_priv->mm.gtt->needs_dmar = 1;
#endif
 
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
 
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
else
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
 
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
 
ret = setup_scratch_page(dev);
if (ret) {
if (ret)
DRM_ERROR("Scratch setup failed\n");
goto err_out;
 
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
 
return ret;
}
 
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) {
DRM_ERROR("Failed to map the gtt page table\n");
ret = -ENOMEM;
goto err_out;
static void gen6_gmch_remove(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->gtt.gsm);
teardown_scratch_page(dev_priv->dev);
}
 
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
static int i915_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen,
phys_addr_t *mappable_base,
unsigned long *mappable_end)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
 
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
 
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
 
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
 
return 0;
}
 
err_out:
kfree(dev_priv->mm.gtt);
static void i915_gmch_remove(struct drm_device *dev)
{
// intel_gmch_remove();
}
 
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_gtt *gtt = &dev_priv->gtt;
unsigned long gtt_size;
int ret;
 
if (INTEL_INFO(dev)->gen <= 5) {
dev_priv->gtt.gtt_probe = i915_gmch_probe;
dev_priv->gtt.gtt_remove = i915_gmch_remove;
} else {
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
}
 
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
&dev_priv->gtt.stolen_size,
&gtt->mappable_base,
&gtt->mappable_end);
if (ret)
return ret;
 
gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
 
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
dev_priv->gtt.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
dev_priv->gtt.mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
dev_priv->gtt.stolen_size >> 20);
 
return 0;
}
 
 
struct scatterlist *sg_next(struct scatterlist *sg)
{
if (sg_is_last(sg))
/drivers/video/drm/i915/i915_gem_stolen.c
42,85 → 42,73
* for is a boon.
*/
 
#define PTE_ADDRESS_MASK 0xfffff000
#define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
#define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
#define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
#define PTE_MAPPING_TYPE_CACHED (3 << 1)
#define PTE_MAPPING_TYPE_MASK (3 << 1)
#define PTE_VALID (1 << 0)
 
/**
* i915_stolen_to_phys - take an offset into stolen memory and turn it into
* a physical one
* @dev: drm device
* @offset: address to translate
*
* Some chip functions require allocations from stolen space and need the
* physical address of the memory in question.
*/
static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct pci_dev *pdev = dev_priv->bridge_dev;
u32 base;
 
#if 0
/* On the machines I have tested the Graphics Base of Stolen Memory
* is unreliable, so compute the base by subtracting the stolen memory
* from the Top of Low Usable DRAM which is where the BIOS places
* the graphics stolen memory.
* is unreliable, so on those compute the base by subtracting the
* stolen memory from the Top of Low Usable DRAM which is where the
* BIOS places the graphics stolen memory.
*
* On gen2, the layout is slightly different with the Graphics Segment
* immediately following Top of Memory (or Top of Usable DRAM). Note
* it appears that TOUD is only reported by 865g, so we just use the
* top of memory as determined by the e820 probe.
*
* XXX gen2 requires an unavailable symbol and 945gm fails with
* its value of TOLUD.
*/
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* top 32bits are reserved = 0 */
base = 0;
if (INTEL_INFO(dev)->gen >= 6) {
/* Read Base Data of Stolen Memory Register (BDSM) directly.
* Note that there is also a MCHBAR miror at 0x1080c0 or
* we could use device 2:0x5c instead.
*/
pci_read_config_dword(pdev, 0xB0, &base);
base &= ~4095; /* lower bits used for locking register */
} else if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
/* Read Graphics Base of Stolen Memory directly */
pci_read_config_dword(pdev, 0xA4, &base);
} else {
/* XXX presume 8xx is the same as i915 */
pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
}
#else
if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
u16 val;
pci_read_config_word(pdev, 0xb0, &val);
base = val >> 4 << 20;
} else {
#if 0
} else if (IS_GEN3(dev)) {
u8 val;
/* Stolen is immediately below Top of Low Usable DRAM */
pci_read_config_byte(pdev, 0x9c, &val);
base = val >> 3 << 27;
}
base -= dev_priv->mm.gtt->stolen_size;
} else {
/* Stolen is immediately above Top of Memory */
base = max_low_pfn_mapped << PAGE_SHIFT;
#endif
 
return base + offset;
}
 
static void i915_warn_stolen(struct drm_device *dev)
{
DRM_INFO("not enough stolen space for compressed buffer, disabling\n");
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
return base;
}
 
static void i915_setup_compression(struct drm_device *dev, int size)
static int i915_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
unsigned long cfb_base;
unsigned long ll_base = 0;
 
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
 
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
/* Try to over-allocate to reduce reallocations and fragmentation */
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size <<= 1, 4096, 0);
if (!compressed_fb)
compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen,
size >>= 1, 4096, 0);
if (compressed_fb)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb)
goto err;
 
cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
if (!cfb_base)
goto err_fb;
 
if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
4096, 4096, 0);
if (compressed_llb)
129,73 → 117,206
if (!compressed_llb)
goto err_fb;
 
ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
if (!ll_base)
goto err_llb;
dev_priv->compressed_llb = compressed_llb;
 
I915_WRITE(FBC_CFB_BASE,
dev_priv->mm.stolen_base + compressed_fb->start);
I915_WRITE(FBC_LL_BASE,
dev_priv->mm.stolen_base + compressed_llb->start);
}
 
dev_priv->compressed_fb = compressed_fb;
dev_priv->cfb_size = size;
 
dev_priv->compressed_fb = compressed_fb;
if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
} else {
I915_WRITE(FBC_CFB_BASE, cfb_base);
I915_WRITE(FBC_LL_BASE, ll_base);
dev_priv->compressed_llb = compressed_llb;
}
DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
size);
 
DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
cfb_base, ll_base, size >> 20);
return;
return 0;
 
err_llb:
drm_mm_put_block(compressed_llb);
err_fb:
drm_mm_put_block(compressed_fb);
err:
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
return -ENOSPC;
}
 
static void i915_cleanup_compression(struct drm_device *dev)
int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->mm.stolen_base == 0)
return -ENODEV;
 
if (size < dev_priv->cfb_size)
return 0;
 
/* Release any current block */
i915_gem_stolen_cleanup_compression(dev);
 
return i915_setup_compression(dev, size);
}
 
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->cfb_size == 0)
return;
 
if (dev_priv->compressed_fb)
drm_mm_put_block(dev_priv->compressed_fb);
 
if (dev_priv->compressed_llb)
drm_mm_put_block(dev_priv->compressed_llb);
 
dev_priv->cfb_size = 0;
}
 
void i915_gem_cleanup_stolen(struct drm_device *dev)
{
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
struct drm_i915_private *dev_priv = dev->dev_private;
 
i915_gem_stolen_cleanup_compression(dev);
drm_mm_takedown(&dev_priv->mm.stolen);
}
 
int i915_gem_init_stolen(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long prealloc_size = dev_priv->mm.gtt->stolen_size;
 
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
 
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
 
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
 
/* Try to set up FBC with a reasonable compressed buffer size */
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
return 0;
}
 
/* Leave 1M for line length buffer & misc. */
static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
u32 offset, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st;
struct scatterlist *sg;
 
/* Try to get a 32M buffer... */
if (prealloc_size > (36*1024*1024))
cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->gtt.stolen_size - size);
 
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
* dma mapping in a single scatterlist.
*/
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return NULL;
 
if (sg_alloc_table(st, 1, GFP_KERNEL)) {
kfree(st);
return NULL;
}
 
return 0;
sg = st->sgl;
sg->offset = offset;
sg->length = size;
 
sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
sg_dma_len(sg) = size;
 
return st;
}
 
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
BUG();
return -EINVAL;
}
 
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
{
/* Should only be called during free */
sg_free_table(obj->pages);
kfree(obj->pages);
}
 
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
.get_pages = i915_gem_object_get_pages_stolen,
.put_pages = i915_gem_object_put_pages_stolen,
};
 
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
struct drm_mm_node *stolen)
{
struct drm_i915_gem_object *obj;
 
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
return NULL;
 
if (drm_gem_private_object_init(dev, &obj->base, stolen->size))
goto cleanup;
 
i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
 
obj->pages = i915_pages_create_for_stolen(dev,
stolen->start, stolen->size);
if (obj->pages == NULL)
goto cleanup;
 
obj->has_dma_mapping = true;
obj->pages_pin_count = 1;
obj->stolen = stolen;
 
obj->base.write_domain = I915_GEM_DOMAIN_GTT;
obj->base.read_domains = I915_GEM_DOMAIN_GTT;
obj->cache_level = I915_CACHE_NONE;
 
return obj;
 
cleanup:
i915_gem_object_free(obj);
return NULL;
}
 
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
 
if (dev_priv->mm.stolen_base == 0)
return NULL;
 
DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
if (size == 0)
return NULL;
 
stolen = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
if (stolen)
stolen = drm_mm_get_block(stolen, size, 4096);
if (stolen == NULL)
return NULL;
 
obj = _i915_gem_object_create_stolen(dev, stolen);
if (obj)
return obj;
 
drm_mm_put_block(stolen);
return NULL;
}
 
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
if (obj->stolen) {
drm_mm_put_block(obj->stolen);
obj->stolen = NULL;
}
}
/drivers/video/drm/i915/i915_gem_tiling.c
291,18 → 291,7
return false;
}
 
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024;
else
size = 512*1024;
 
while (size < obj->base.size)
size <<= 1;
 
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
return false;
 
387,15 → 376,15
 
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
 
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev,
obj->base.size,
args->tiling_mode);
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}
415,6 → 404,18
/* we have to maintain this existing ABI... */
args->stride = obj->stride;
args->tiling_mode = obj->tiling_mode;
 
/* Try to preallocate memory required to save swizzling on put-pages */
if (i915_gem_object_needs_bit17_swizzle(obj)) {
if (obj->bit_17 == NULL) {
obj->bit_17 = kmalloc(BITS_TO_LONGS(obj->base.size >> PAGE_SHIFT) *
sizeof(long), GFP_KERNEL);
}
} else {
kfree(obj->bit_17);
obj->bit_17 = NULL;
}
 
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
 
/drivers/video/drm/i915/i915_irq.c
45,33 → 45,8
 
#define MAX_NOPID ((u32)~0)
 
/**
* Interrupts that are always left unmasked.
*
* Since pipe events are edge-triggered from the PIPESTAT register to IIR,
* we leave them always unmasked in IMR and then control enabling them through
* PIPESTAT alone.
*/
#define I915_INTERRUPT_ENABLE_FIX \
(I915_ASLE_INTERRUPT | \
I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | \
I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | \
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
/** Interrupts that we mask and unmask at runtime. */
#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
 
#define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
PIPE_VBLANK_INTERRUPT_STATUS)
 
#define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
PIPE_VBLANK_INTERRUPT_ENABLE)
 
#define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
DRM_I915_VBLANK_PIPE_B)
 
/* For display hotplug interrupt */
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
215,7 → 190,34
return I915_READ(reg);
}
 
/*
* Handle hotplug events outside the interrupt handler proper.
*/
static void i915_hotplug_work_func(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
hotplug_work);
struct drm_device *dev = dev_priv->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
 
/* HPD irq before everything is fully set up. */
if (!dev_priv->enable_hotplug_processing)
return;
 
mutex_lock(&mode_config->mutex);
DRM_DEBUG_KMS("running encoder hotplug functions\n");
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
 
mutex_unlock(&mode_config->mutex);
 
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
}
 
static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
402,6 → 404,20
// queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
static void gmbus_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
wake_up_all(&dev_priv->gmbus_wait_queue);
}
 
static void dp_aux_irq_handler(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
wake_up_all(&dev_priv->gmbus_wait_queue);
}
 
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
411,7 → 427,6
unsigned long irqflags;
int pipe;
u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
 
atomic_inc(&dev_priv->irq_received);
 
462,19 → 477,19
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
// if (hotplug_status & dev_priv->hotplug_supported_mask)
// queue_work(dev_priv->wq,
// &dev_priv->hotplug_work);
if (hotplug_status & dev_priv->hotplug_supported_mask)
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
}
 
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
 
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
// gen6_queue_rps_work(dev_priv, pm_iir);
 
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
490,7 → 505,8
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
printf("%s\n", __FUNCTION__);
if (pch_iir & SDE_HOTPLUG_MASK)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
497,8 → 513,11
(pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
 
if (pch_iir & SDE_AUX_MASK)
dp_aux_irq_handler(dev);
 
if (pch_iir & SDE_GMBUS)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
gmbus_irq_handler(dev);
 
if (pch_iir & SDE_AUDIO_HDCP_MASK)
DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
532,6 → 551,9
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
if (pch_iir & SDE_HOTPLUG_MASK_CPT)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
538,10 → 560,10
SDE_AUDIO_POWER_SHIFT_CPT);
 
if (pch_iir & SDE_AUX_MASK_CPT)
DRM_DEBUG_DRIVER("AUX channel interrupt\n");
dp_aux_irq_handler(dev);
 
if (pch_iir & SDE_GMBUS_CPT)
DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
gmbus_irq_handler(dev);
 
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
560,7 → 582,7
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 de_iir, gt_iir, de_ier, pm_iir;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
irqreturn_t ret = IRQ_NONE;
int i;
 
570,6 → 592,15
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
 
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
 
gt_iir = I915_READ(GTIIR);
if (gt_iir) {
snb_gt_irq_handler(dev, dev_priv, gt_iir);
579,6 → 610,8
 
de_iir = I915_READ(DEIIR);
if (de_iir) {
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev);
#if 0
if (de_iir & DE_GSE_IVB)
intel_opregion_gse_intr(dev);
596,8 → 629,6
if (de_iir & DE_PCH_EVENT_IVB) {
u32 pch_iir = I915_READ(SDEIIR);
 
// if (pch_iir & SDE_HOTPLUG_MASK_CPT)
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
cpt_irq_handler(dev, pch_iir);
 
/* clear PCH hotplug event before clear CPU irq */
618,6 → 649,8
 
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
 
return ret;
}
637,7 → 670,7
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
 
atomic_inc(&dev_priv->irq_received);
 
646,13 → 679,20
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
POSTING_READ(DEIER);
 
/* Disable south interrupts. We'll only write to SDEIIR once, so further
* interrupts will will be stored on its back queue, and then we'll be
* able to process them after we restore SDEIER (as soon as we restore
* it, we'll get an interrupt if SDEIIR still has something to process
* due to its back queue). */
sde_ier = I915_READ(SDEIER);
I915_WRITE(SDEIER, 0);
POSTING_READ(SDEIER);
 
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
pm_iir = I915_READ(GEN6_PMIIR);
 
if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
(!IS_GEN6(dev) || pm_iir == 0))
if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
goto done;
 
ret = IRQ_HANDLED;
661,6 → 701,10
ilk_gt_irq_handler(dev, dev_priv, gt_iir);
else
snb_gt_irq_handler(dev, dev_priv, gt_iir);
 
if (de_iir & DE_AUX_CHANNEL_A)
dp_aux_irq_handler(dev);
 
#if 0
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
684,12 → 728,15
 
/* check event from PCH */
if (de_iir & DE_PCH_EVENT) {
// if (pch_iir & hotplug_mask)
// queue_work(dev_priv->wq, &dev_priv->hotplug_work);
u32 pch_iir = I915_READ(SDEIIR);
 
if (HAS_PCH_CPT(dev))
cpt_irq_handler(dev, pch_iir);
else
ibx_irq_handler(dev, pch_iir);
 
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
}
#if 0
if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
698,8 → 745,6
if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
#endif
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(DEIIR, de_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
707,6 → 752,8
done:
I915_WRITE(DEIER, de_ier);
POSTING_READ(DEIER);
I915_WRITE(SDEIER, sde_ier);
POSTING_READ(SDEIER);
 
return ret;
}
733,7 → 780,7
instdone[1] = I915_READ(INSTDONE1);
break;
default:
WARN(1, "Unsupported platform\n");
WARN_ONCE(1, "Unsupported platform\n");
case 7:
instdone[0] = I915_READ(GEN7_INSTDONE_1);
instdone[1] = I915_READ(GEN7_SC_INSTDONE);
771,7 → 818,7
goto unwind;
 
local_irq_save(flags);
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
 
780,10 → 827,18
* captures what the GPU read.
*/
 
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
} else if (src->stolen) {
unsigned long offset;
 
offset = dev_priv->mm.stolen_base;
offset += src->stolen->start;
offset += i << PAGE_SHIFT;
 
memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
} else {
struct page *page;
void *s;
930,6 → 985,8
error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
 
default:
BUG();
}
}
 
943,6 → 1000,18
if (!ring->get_seqno)
return NULL;
 
if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
u32 acthd = I915_READ(ACTHD);
 
if (WARN_ON(ring->id != RCS))
return NULL;
 
obj = ring->private;
if (acthd >= obj->gtt_offset &&
acthd < obj->gtt_offset + obj->base.size)
return i915_error_object_create(dev_priv, obj);
}
 
seqno = ring->get_seqno(ring, false);
list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (obj->ring != ring)
1066,9 → 1135,9
unsigned long flags;
int i, pipe;
 
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
 
1079,7 → 1148,8
return;
}
 
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
DRM_INFO("capturing error event; look for more information in"
"/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
 
kref_init(&error->ref);
1162,12 → 1232,12
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
 
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
error = NULL;
}
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
if (error)
i915_error_state_free(&error->ref);
1179,10 → 1249,10
struct drm_i915_error_state *error;
unsigned long flags;
 
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
 
if (error)
kref_put(&error->ref, i915_error_state_free);
1303,11 → 1373,12
i915_report_and_clear_eir(dev);
 
if (wedged) {
// INIT_COMPLETION(dev_priv->error_completion);
atomic_set(&dev_priv->mm.wedged, 1);
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);
 
/*
* Wakeup waiting processes so they don't hang
* Wakeup waiting processes so that the reset work item
* doesn't deadlock trying to grab various locks.
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
1579,7 → 1650,7
* This register is the same on all known PCH chips.
*/
 
static void ironlake_enable_pch_hotplug(struct drm_device *dev)
static void ibx_enable_hotplug(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug;
1592,14 → 1663,36
I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
}
 
static void ibx_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 mask;
 
if (HAS_PCH_IBX(dev))
mask = SDE_HOTPLUG_MASK |
SDE_GMBUS |
SDE_AUX_MASK;
else
mask = SDE_HOTPLUG_MASK_CPT |
SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT;
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, ~mask);
I915_WRITE(SDEIER, mask);
POSTING_READ(SDEIER);
 
ibx_enable_hotplug(dev);
}
 
static int ironlake_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A;
u32 render_irqs;
u32 hotplug_mask;
 
dev_priv->irq_mask = ~display_mask;
 
1627,33 → 1720,13
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
 
if (HAS_PCH_CPT(dev)) {
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
} else {
hotplug_mask = (SDE_CRT_HOTPLUG |
SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG |
SDE_PORTD_HOTPLUG |
SDE_AUX_MASK);
}
ibx_irq_postinstall(dev);
 
dev_priv->pch_irq_mask = ~hotplug_mask;
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
 
// ironlake_enable_pch_hotplug(dev);
 
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
// ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
}
 
return 0;
1667,9 → 1740,9
DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB;
DE_PLANEA_FLIP_DONE_IVB |
DE_AUX_CHANNEL_A_IVB;
u32 render_irqs;
u32 hotplug_mask;
 
dev_priv->irq_mask = ~display_mask;
 
1693,19 → 1766,8
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
 
hotplug_mask = (SDE_CRT_HOTPLUG_CPT |
SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask;
ibx_irq_postinstall(dev);
 
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
 
// ironlake_enable_pch_hotplug(dev);
 
return 0;
}
 
1713,7 → 1775,6
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
1742,6 → 1803,9
// msid |= (1<<14);
// pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
I915_WRITE(VLV_IIR, 0xffffffff);
1750,6 → 1814,7
POSTING_READ(VLV_IER);
 
i915_enable_pipestat(dev_priv, 0, pipestat_enable);
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
i915_enable_pipestat(dev_priv, 1, pipestat_enable);
 
I915_WRITE(VLV_IIR, 0xffffffff);
1770,14 → 1835,22
#endif
 
I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
#if 0 /* FIXME: check register definitions; some have moved */
 
return 0;
}
 
static void valleyview_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 
/* Note HDMI and DP share bits */
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
hotplug_en |= PORTB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
hotplug_en |= PORTC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
1786,11 → 1859,8
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
return 0;
}
 
static void valleyview_irq_uninstall(struct drm_device *dev)
2022,28 → 2092,40
I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
I915_USER_INTERRUPT;
#if 0
 
if (I915_HAS_HOTPLUG(dev)) {
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
#endif
 
I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
 
// intel_opregion_enable_asle(dev);
 
return 0;
}
 
static void i915_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
 
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
#if 0
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
 
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
hotplug_en |= PORTB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
hotplug_en |= PORTC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
2052,15 → 2134,11
hotplug_en |= CRT_HOTPLUG_INT_EN;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
 
/* Ignore TV since it's buggy */
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
}
 
// intel_opregion_enable_asle(dev);
 
return 0;
}
 
static irqreturn_t i915_irq_handler(int irq, void *arg)
2119,9 → 2197,9
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
// if (hotplug_status & dev_priv->hotplug_supported_mask)
// queue_work(dev_priv->wq,
// &dev_priv->hotplug_work);
if (hotplug_status & dev_priv->hotplug_supported_mask)
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
POSTING_READ(PORT_HOTPLUG_STAT);
2220,7 → 2298,6
static int i965_irq_postinstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
u32 enable_mask;
u32 error_mask;
 
2241,6 → 2318,7
 
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
 
/*
* Enable some error detection, note the instruction error mask
2261,15 → 2339,27
I915_WRITE(IER, enable_mask);
POSTING_READ(IER);
 
I915_WRITE(PORT_HOTPLUG_EN, 0);
POSTING_READ(PORT_HOTPLUG_EN);
 
// intel_opregion_enable_asle(dev);
 
return 0;
}
 
static void i965_hpd_irq_setup(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
u32 hotplug_en;
 
/* Note HDMI and DP share hotplug bits */
hotplug_en = 0;
#if 0
if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS)
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTB_HOTPLUG_INT_STATUS)
hotplug_en |= PORTB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTC_HOTPLUG_INT_STATUS)
hotplug_en |= PORTC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & PORTD_HOTPLUG_INT_STATUS)
hotplug_en |= PORTD_HOTPLUG_INT_EN;
if (IS_G4X(dev)) {
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
2292,14 → 2382,10
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
}
#endif
 
/* Ignore TV since it's buggy */
 
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 
// intel_opregion_enable_asle(dev);
 
return 0;
}
 
static irqreturn_t i965_irq_handler(int irq, void *arg)
2358,9 → 2444,9
 
DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
hotplug_status);
// if (hotplug_status & dev_priv->hotplug_supported_mask)
// queue_work(dev_priv->wq,
// &dev_priv->hotplug_work);
if (hotplug_status & dev_priv->hotplug_supported_mask)
queue_work(dev_priv->wq,
&dev_priv->hotplug_work);
 
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
I915_READ(PORT_HOTPLUG_STAT);
2395,6 → 2481,9
// if (blc_event || (iir & I915_ASLE_INTERRUPT))
// intel_opregion_asle_intr(dev);
 
if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
gmbus_irq_handler(dev);
 
/* With MSI, interrupts are only generated when iir
* transitions from zero to nonzero. If another bit got
* set while we were handling the existing iir bits, then
2445,20 → 2534,22
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 
// pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
 
 
if (IS_VALLEYVIEW(dev)) {
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
} else if (IS_IVYBRIDGE(dev)) {
dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup;
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
} else if (IS_HASWELL(dev)) {
/* Share interrupts handling with IVB */
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
} else if (HAS_PCH_SPLIT(dev)) {
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
2469,16 → 2560,25
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_handler = i915_irq_handler;
dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
} else {
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_handler = i965_irq_handler;
dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup;
}
}
}
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
void intel_hpd_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev);
}
 
 
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
 
/drivers/video/drm/i915/i915_reg.h
141,8 → 141,15
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
 
#define VGA_SR_INDEX 0x3c4
#define VGA_SR_DATA 0x3c5
/*
* SR01 is the only VGA register touched on non-UMS setups.
* VLV doesn't do UMS, so the sequencer index/data registers
* are the only VGA registers which need to include
* display_mmio_offset.
*/
#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
#define SR01 1
#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
 
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
301,6 → 308,7
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
#define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
#define PIPE_CONTROL_GLOBAL_GTT_IVB (1<<24) /* gen7+ */
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
#define PIPE_CONTROL_QW_WRITE (1<<14)
335,8 → 343,10
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
*
* DPIO is VLV only.
*/
#define DPIO_PKT 0x2100
#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
343,9 → 353,9
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
#define DPIO_DATA 0x2104
#define DPIO_REG 0x2108
#define DPIO_CTL 0x2110
#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
556,13 → 566,13
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE 0x182060
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
#define VLV_IMR 0x1820a8
#define VLV_ISR 0x1820ac
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
735,6 → 745,7
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
921,8 → 932,8
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
#define _DPLL_A 0x06014
#define _DPLL_B 0x06018
#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
943,23 → 954,6
#define DPLL_LOCK_VLV (1<<15)
#define DPLL_INTEGRATED_CLOCK_VLV (1<<13)
 
#define SRX_INDEX 0x3c4
#define SRX_DATA 0x3c5
#define SR01 1
#define SR01_SCREEN_OFF (1<<5)
 
#define PPCR 0x61204
#define PPCR_ON (1<<0)
 
#define DVOB 0x61140
#define DVOB_ON (1<<31)
#define DVOC 0x61160
#define DVOC_ON (1<<31)
#define LVDS 0x61180
#define LVDS_ON (1<<31)
 
/* Scratch pad debug 0 reg:
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
/*
* The i830 generation, in LVDS mode, defines P1 as the bit number set within
998,7 → 992,7
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
#define _DPLL_A_MD 0x0601c /* 965+ only */
#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
1035,7 → 1029,7
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
 
#define _FPA0 0x06040
1178,7 → 1172,7
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
 
#define FW_BLC_SELF_VLV 0x6500
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
 
/*
1185,8 → 1179,8
* Palette regs
*/
 
#define _PALETTE_A 0x0a000
#define _PALETTE_B 0x0a800
#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
 
/* MCH MMIO space */
1242,6 → 1236,10
#define MAD_DIMM_A_SIZE_SHIFT 0
#define MAD_DIMM_A_SIZE_MASK (0xff << MAD_DIMM_A_SIZE_SHIFT)
 
/** snb MCH registers for priority tuning */
#define MCH_SSKPD (MCHBAR_MIRROR_BASE_SNB + 0x5d10)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
 
/* Clocking configuration register */
#define CLKCFG 0x10c00
1551,26 → 1549,26
*/
 
/* Pipe A timing regs */
#define _HTOTAL_A 0x60000
#define _HBLANK_A 0x60004
#define _HSYNC_A 0x60008
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
 
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
#define _HBLANK_B 0x61004
#define _HSYNC_B 0x61008
#define _VTOTAL_B 0x6100c
#define _VBLANK_B 0x61010
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
 
 
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
1615,9 → 1613,9
#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
#define ADPA_USE_VGA_HVPOLARITY (1<<15)
#define ADPA_SETS_HVPOLARITY 0
#define ADPA_VSYNC_CNTL_DISABLE (1<<11)
#define ADPA_VSYNC_CNTL_DISABLE (1<<10)
#define ADPA_VSYNC_CNTL_ENABLE 0
#define ADPA_HSYNC_CNTL_DISABLE (1<<10)
#define ADPA_HSYNC_CNTL_DISABLE (1<<11)
#define ADPA_HSYNC_CNTL_ENABLE 0
#define ADPA_VSYNC_ACTIVE_HIGH (1<<4)
#define ADPA_VSYNC_ACTIVE_LOW 0
1631,13 → 1629,10
 
 
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN 0x61110
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
#define DPB_HOTPLUG_INT_EN (1 << 29)
#define HDMIC_HOTPLUG_INT_EN (1 << 28)
#define DPC_HOTPLUG_INT_EN (1 << 28)
#define HDMID_HOTPLUG_INT_EN (1 << 27)
#define DPD_HOTPLUG_INT_EN (1 << 27)
#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
#define PORTB_HOTPLUG_INT_EN (1 << 29)
#define PORTC_HOTPLUG_INT_EN (1 << 28)
#define PORTD_HOTPLUG_INT_EN (1 << 27)
#define SDVOB_HOTPLUG_INT_EN (1 << 26)
#define SDVOC_HOTPLUG_INT_EN (1 << 25)
#define TV_HOTPLUG_INT_EN (1 << 18)
1658,21 → 1653,14
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
 
#define PORT_HOTPLUG_STAT 0x61114
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
/* HDMI/DP bits are gen4+ */
#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
#define DPD_HOTPLUG_LIVE_STATUS (1 << 27)
#define DPD_HOTPLUG_INT_STATUS (3 << 21)
#define DPC_HOTPLUG_INT_STATUS (3 << 19)
#define DPB_HOTPLUG_INT_STATUS (3 << 17)
/* HDMI bits are shared with the DP bits */
#define HDMIB_HOTPLUG_LIVE_STATUS (1 << 29)
#define HDMIC_HOTPLUG_LIVE_STATUS (1 << 28)
#define HDMID_HOTPLUG_LIVE_STATUS (1 << 27)
#define HDMID_HOTPLUG_INT_STATUS (3 << 21)
#define HDMIC_HOTPLUG_INT_STATUS (3 << 19)
#define HDMIB_HOTPLUG_INT_STATUS (3 << 17)
#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29)
#define PORTC_HOTPLUG_LIVE_STATUS (1 << 28)
#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTC_HOTPLUG_INT_STATUS (3 << 19)
#define PORTB_HOTPLUG_INT_STATUS (3 << 17)
/* CRT/TV common between gen3+ */
#define CRT_HOTPLUG_INT_STATUS (1 << 11)
#define TV_HOTPLUG_INT_STATUS (1 << 10)
1877,7 → 1865,7
#define PP_DIVISOR 0x61210
 
/* Panel fitting */
#define PFIT_CONTROL 0x61230
#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
1895,9 → 1883,7
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234
#define PFIT_VERT_SCALE_MASK 0xfff00000
#define PFIT_HORIZ_SCALE_MASK 0x0000fff0
#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
1909,7 → 1895,7
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
 
#define PFIT_AUTO_RATIOS 0x61238
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
 
/* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
2639,10 → 2625,10
/* Display & cursor control */
 
/* Pipe A */
#define _PIPEADSL 0x70000
#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
2671,11 → 2657,12
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
#define PIPECONF_BPP_10 (1<<5)
#define PIPECONF_BPP_6 (2<<5)
#define PIPECONF_BPP_12 (3<<5)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5)
#define PIPECONF_10BPC (1<<5)
#define PIPECONF_6BPC (2<<5)
#define PIPECONF_12BPC (3<<5)
#define PIPECONF_DITHER_EN (1<<4)
#define PIPECONF_DITHER_TYPE_MASK (0x0000000c)
#define PIPECONF_DITHER_TYPE_SP (0<<2)
2682,7 → 2669,7
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
2693,7 → 2680,7
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
2703,7 → 2690,7
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
2719,11 → 2706,6
#define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */
#define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1)
#define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0)
#define PIPE_BPC_MASK (7 << 5) /* Ironlake */
#define PIPE_8BPC (0 << 5)
#define PIPE_10BPC (1 << 5)
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
 
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
2732,7 → 2714,7
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
 
#define VLV_DPFLIPSTAT 0x70028
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
2746,7 → 2728,7
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
 
#define DPINVGTT 0x7002c /* VLV only */
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
2774,7 → 2756,7
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
 
#define DSPFW1 0x70034
#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
2782,11 → 2764,11
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
2798,13 → 2780,13
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
#define VLV_DDL1 0x70050
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
#define VLV_DDL2 0x70054
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
2948,10 → 2930,10
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
#define _PIPEAFRAMEHIGH 0x70040
#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
#define _PIPEAFRAMEPIXEL 0x70044
#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
2962,11 → 2944,12
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
 
/* Cursor A & B regs */
#define _CURACNTR 0x70080
#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
#define CURSOR_STRIDE_MASK 0x30000000
#define CURSOR_PIPE_CSC_ENABLE (1<<24)
#define CURSOR_FORMAT_SHIFT 24
#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
2983,16 → 2966,16
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
 
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
3007,7 → 2990,7
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
 
/* Display A control */
#define _DSPACNTR 0x70180
#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
3028,6 → 3011,7
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_PIPE_CSC_ENABLE (1<<24)
#define DISPPLANE_SEL_PIPE_SHIFT 24
#define DISPPLANE_SEL_PIPE_MASK (3<<DISPPLANE_SEL_PIPE_SHIFT)
#define DISPPLANE_SEL_PIPE_A 0
3040,14 → 3024,14
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
 
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
3068,44 → 3052,44
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
 
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
#define SWF02 0x71418
#define SWF03 0x7141c
#define SWF04 0x71420
#define SWF05 0x71424
#define SWF06 0x71428
#define SWF10 0x70410
#define SWF11 0x70414
#define SWF14 0x71420
#define SWF30 0x72414
#define SWF31 0x72418
#define SWF32 0x7241c
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
 
/* Pipe B */
#define _PIPEBDSL 0x71000
#define _PIPEBCONF 0x71008
#define _PIPEBSTAT 0x71024
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
 
 
/* Display B control */
#define _DSPBCNTR 0x71180
#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
#define _DSPBADDR 0x71184
#define _DSPBSTRIDE 0x71188
#define _DSPBPOS 0x7118C
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
#define _DSPBOFFSET 0x711A4
#define _DSPBSURFLIVE 0x711AC
#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
 
/* Sprite A control */
#define _DVSACNTR 0x72180
3116,6 → 3100,7
#define DVS_FORMAT_RGBX101010 (1<<25)
#define DVS_FORMAT_RGBX888 (2<<25)
#define DVS_FORMAT_RGBX161616 (3<<25)
#define DVS_PIPE_CSC_ENABLE (1<<24)
#define DVS_SOURCE_KEY (1<<22)
#define DVS_RGB_ORDER_XBGR (1<<20)
#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
3183,7 → 3168,7
#define SPRITE_FORMAT_RGBX161616 (3<<25)
#define SPRITE_FORMAT_YUV444 (4<<25)
#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
#define SPRITE_CSC_ENABLE (1<<24)
#define SPRITE_PIPE_CSC_ENABLE (1<<24)
#define SPRITE_SOURCE_KEY (1<<22)
#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
3254,6 → 3239,8
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
 
#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
 
/* Ironlake */
 
#define CPU_VGACNTRL 0x41000
3294,41 → 3281,41
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
 
 
#define _PIPEA_DATA_M1 0x60030
#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
#define _PIPEA_DATA_N1 0x60034
#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define PIPE_DATA_N1_OFFSET 0
 
#define _PIPEA_DATA_M2 0x60038
#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
#define PIPE_DATA_M2_OFFSET 0
#define _PIPEA_DATA_N2 0x6003c
#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
#define PIPE_DATA_N2_OFFSET 0
 
#define _PIPEA_LINK_M1 0x60040
#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
#define PIPE_LINK_M1_OFFSET 0
#define _PIPEA_LINK_N1 0x60044
#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
#define PIPE_LINK_N1_OFFSET 0
 
#define _PIPEA_LINK_M2 0x60048
#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
#define PIPE_LINK_M2_OFFSET 0
#define _PIPEA_LINK_N2 0x6004c
#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
#define PIPE_LINK_N2_OFFSET 0
 
/* PIPEB timing regs are same start from 0x61000 */
 
#define _PIPEB_DATA_M1 0x61030
#define _PIPEB_DATA_N1 0x61034
#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
 
#define _PIPEB_DATA_M2 0x61038
#define _PIPEB_DATA_N2 0x6103c
#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
 
#define _PIPEB_LINK_M1 0x61040
#define _PIPEB_LINK_N1 0x61044
#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
 
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
 
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
3581,9 → 3568,10
#define PORTD_PULSE_DURATION_6ms (2 << 18)
#define PORTD_PULSE_DURATION_100ms (3 << 18)
#define PORTD_PULSE_DURATION_MASK (3 << 18)
#define PORTD_HOTPLUG_NO_DETECT (0)
#define PORTD_HOTPLUG_STATUS_MASK (0x3 << 16)
#define PORTD_HOTPLUG_NO_DETECT (0 << 16)
#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
#define PORTD_HOTPLUG_LONG_DETECT (2 << 16)
#define PORTC_HOTPLUG_ENABLE (1 << 12)
#define PORTC_PULSE_DURATION_2ms (0)
#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
3590,9 → 3578,10
#define PORTC_PULSE_DURATION_6ms (2 << 10)
#define PORTC_PULSE_DURATION_100ms (3 << 10)
#define PORTC_PULSE_DURATION_MASK (3 << 10)
#define PORTC_HOTPLUG_NO_DETECT (0)
#define PORTC_HOTPLUG_STATUS_MASK (0x3 << 8)
#define PORTC_HOTPLUG_NO_DETECT (0 << 8)
#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
#define PORTC_HOTPLUG_LONG_DETECT (2 << 8)
#define PORTB_HOTPLUG_ENABLE (1 << 4)
#define PORTB_PULSE_DURATION_2ms (0)
#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
3599,9 → 3588,10
#define PORTB_PULSE_DURATION_6ms (2 << 2)
#define PORTB_PULSE_DURATION_100ms (3 << 2)
#define PORTB_PULSE_DURATION_MASK (3 << 2)
#define PORTB_HOTPLUG_NO_DETECT (0)
#define PORTB_HOTPLUG_STATUS_MASK (0x3 << 0)
#define PORTB_HOTPLUG_NO_DETECT (0 << 0)
#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
#define PORTB_HOTPLUG_LONG_DETECT (2 << 0)
 
#define PCH_GPIOA 0xc5010
#define PCH_GPIOB 0xc5014
3722,13 → 3712,13
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
 
#define VLV_VIDEO_DIP_CTL_B 0x61170
#define VLV_VIDEO_DIP_DATA_B 0x61174
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
 
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
3820,8 → 3810,6
#define TRANS_FSYNC_DELAY_HB2 (1<<27)
#define TRANS_FSYNC_DELAY_HB3 (2<<27)
#define TRANS_FSYNC_DELAY_HB4 (3<<27)
#define TRANS_DP_AUDIO_ONLY (1<<26)
#define TRANS_DP_VIDEO_AUDIO (0<<26)
#define TRANS_INTERLACE_MASK (7<<21)
#define TRANS_PROGRESSIVE (0<<21)
#define TRANS_INTERLACED (3<<21)
3927,7 → 3915,7
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
#define FDI_12BPC (3<<16)
#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
#define FDI_RX_LINK_REVERSAL_OVERRIDE (1<<15)
#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
#define FDI_RX_PLL_ENABLE (1<<13)
#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
4020,17 → 4008,17
#define LVDS_DETECTED (1 << 1)
 
/* vlv has 2 sets of panel control regs. */
#define PIPEA_PP_STATUS 0x61200
#define PIPEA_PP_CONTROL 0x61204
#define PIPEA_PP_ON_DELAYS 0x61208
#define PIPEA_PP_OFF_DELAYS 0x6120c
#define PIPEA_PP_DIVISOR 0x61210
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
 
#define PIPEB_PP_STATUS 0x61300
#define PIPEB_PP_CONTROL 0x61304
#define PIPEB_PP_ON_DELAYS 0x61308
#define PIPEB_PP_OFF_DELAYS 0x6130c
#define PIPEB_PP_DIVISOR 0x61310
#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
 
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
4211,7 → 4199,9
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
4280,8 → 4270,8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
 
4322,7 → 4312,7
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
 
#define G4X_AUD_VID_DID 0x62020
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
4438,10 → 4428,10
#define AUDIO_CP_READY_C (1<<9)
 
/* HSW Power Wells */
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410
4524,6 → 4514,7
#define DDI_BUF_EMP_800MV_0DB_HSW (7<<24) /* Sel7 */
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_PORT_REVERSAL (1<<16)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
4657,4 → 4648,51
#define WM_DBG_DISALLOW_MAXFIFO (1<<1)
#define WM_DBG_DISALLOW_SPRITE (1<<2)
 
/* pipe CSC */
#define _PIPE_A_CSC_COEFF_RY_GY 0x49010
#define _PIPE_A_CSC_COEFF_BY 0x49014
#define _PIPE_A_CSC_COEFF_RU_GU 0x49018
#define _PIPE_A_CSC_COEFF_BU 0x4901c
#define _PIPE_A_CSC_COEFF_RV_GV 0x49020
#define _PIPE_A_CSC_COEFF_BV 0x49024
#define _PIPE_A_CSC_MODE 0x49028
#define _PIPE_A_CSC_PREOFF_HI 0x49030
#define _PIPE_A_CSC_PREOFF_ME 0x49034
#define _PIPE_A_CSC_PREOFF_LO 0x49038
#define _PIPE_A_CSC_POSTOFF_HI 0x49040
#define _PIPE_A_CSC_POSTOFF_ME 0x49044
#define _PIPE_A_CSC_POSTOFF_LO 0x49048
 
#define _PIPE_B_CSC_COEFF_RY_GY 0x49110
#define _PIPE_B_CSC_COEFF_BY 0x49114
#define _PIPE_B_CSC_COEFF_RU_GU 0x49118
#define _PIPE_B_CSC_COEFF_BU 0x4911c
#define _PIPE_B_CSC_COEFF_RV_GV 0x49120
#define _PIPE_B_CSC_COEFF_BV 0x49124
#define _PIPE_B_CSC_MODE 0x49128
#define _PIPE_B_CSC_PREOFF_HI 0x49130
#define _PIPE_B_CSC_PREOFF_ME 0x49134
#define _PIPE_B_CSC_PREOFF_LO 0x49138
#define _PIPE_B_CSC_POSTOFF_HI 0x49140
#define _PIPE_B_CSC_POSTOFF_ME 0x49144
#define _PIPE_B_CSC_POSTOFF_LO 0x49148
 
#define CSC_BLACK_SCREEN_OFFSET (1 << 2)
#define CSC_POSITION_BEFORE_GAMMA (1 << 1)
#define CSC_MODE_YUV_TO_RGB (1 << 0)
 
#define PIPE_CSC_COEFF_RY_GY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RY_GY, _PIPE_B_CSC_COEFF_RY_GY)
#define PIPE_CSC_COEFF_BY(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BY, _PIPE_B_CSC_COEFF_BY)
#define PIPE_CSC_COEFF_RU_GU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RU_GU, _PIPE_B_CSC_COEFF_RU_GU)
#define PIPE_CSC_COEFF_BU(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BU, _PIPE_B_CSC_COEFF_BU)
#define PIPE_CSC_COEFF_RV_GV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_RV_GV, _PIPE_B_CSC_COEFF_RV_GV)
#define PIPE_CSC_COEFF_BV(pipe) _PIPE(pipe, _PIPE_A_CSC_COEFF_BV, _PIPE_B_CSC_COEFF_BV)
#define PIPE_CSC_MODE(pipe) _PIPE(pipe, _PIPE_A_CSC_MODE, _PIPE_B_CSC_MODE)
#define PIPE_CSC_PREOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_HI, _PIPE_B_CSC_PREOFF_HI)
#define PIPE_CSC_PREOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_ME, _PIPE_B_CSC_PREOFF_ME)
#define PIPE_CSC_PREOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_PREOFF_LO, _PIPE_B_CSC_PREOFF_LO)
#define PIPE_CSC_POSTOFF_HI(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_HI, _PIPE_B_CSC_POSTOFF_HI)
#define PIPE_CSC_POSTOFF_ME(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_ME, _PIPE_B_CSC_POSTOFF_ME)
#define PIPE_CSC_POSTOFF_LO(pipe) _PIPE(pipe, _PIPE_A_CSC_POSTOFF_LO, _PIPE_B_CSC_POSTOFF_LO)
 
#endif /* _I915_REG_H_ */
/drivers/video/drm/i915/intel_crt.c
44,6 → 44,9
 
struct intel_crt {
struct intel_encoder base;
/* DPMS state is stored in the connector, which we need in the
* encoder's enable/disable callbacks */
struct intel_connector *connector;
bool force_hotplug_required;
u32 adpa_reg;
};
80,29 → 83,6
return true;
}
 
static void intel_disable_crt(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
 
temp = I915_READ(crt->adpa_reg);
temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
temp &= ~ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}
 
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 temp;
 
temp = I915_READ(crt->adpa_reg);
temp |= ADPA_DAC_ENABLE;
I915_WRITE(crt->adpa_reg, temp);
}
 
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
134,6 → 114,19
I915_WRITE(crt->adpa_reg, temp);
}
 
static void intel_disable_crt(struct intel_encoder *encoder)
{
intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
}
 
static void intel_enable_crt(struct intel_encoder *encoder)
{
struct intel_crt *crt = intel_encoder_to_crt(encoder);
 
intel_crt_set_dpms(encoder, crt->connector->base.dpms);
}
 
 
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
259,6 → 252,8
u32 adpa;
bool ret;
 
ENTER();
 
/* The first time through, trigger an explicit detection cycle */
if (crt->force_hotplug_required) {
bool turn_off_dac = HAS_PCH_SPLIT(dev);
266,7 → 261,7
 
crt->force_hotplug_required = 0;
 
save_adpa = adpa = I915_READ(PCH_ADPA);
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
273,20 → 268,20
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
 
I915_WRITE(PCH_ADPA, adpa);
I915_WRITE(crt->adpa_reg, adpa);
 
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, save_adpa);
POSTING_READ(PCH_ADPA);
I915_WRITE(crt->adpa_reg, save_adpa);
POSTING_READ(crt->adpa_reg);
}
}
 
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
293,6 → 288,8
ret = false;
DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
 
LEAVE();
 
return ret;
}
 
299,26 → 296,29
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
 
save_adpa = adpa = I915_READ(ADPA);
ENTER();
 
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
 
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
 
I915_WRITE(ADPA, adpa);
I915_WRITE(crt->adpa_reg, adpa);
 
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(ADPA, save_adpa);
I915_WRITE(crt->adpa_reg, save_adpa);
}
 
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(ADPA);
adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
329,6 → 329,8
/* FIXME: debug force function and remove */
ret = true;
 
LEAVE();
 
return ret;
}
 
348,6 → 350,8
bool ret = false;
int i, tries = 0;
 
ENTER();
 
if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
 
386,6 → 390,8
/* and put the bits back */
I915_WRITE(PORT_HOTPLUG_EN, orig);
 
LEAVE();
 
return ret;
}
 
394,6 → 400,8
{
struct edid *edid;
 
ENTER();
 
edid = drm_get_edid(connector, i2c);
 
if (!edid && !intel_gmbus_is_forced_bit(i2c)) {
403,6 → 411,8
intel_gmbus_force_bit(i2c, false);
}
 
LEAVE();
 
return edid;
}
 
664,11 → 674,11
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
 
adpa = I915_READ(PCH_ADPA);
adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
 
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
683,7 → 693,6
static const struct drm_encoder_helper_funcs crt_encoder_funcs = {
.mode_fixup = intel_crt_mode_fixup,
.mode_set = intel_crt_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_crt_connector_funcs = {
723,6 → 732,7
}
 
connector = &intel_connector->base;
crt->connector = intel_connector;
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
753,7 → 763,7
 
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
777,10 → 787,14
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
 
/*
* TODO: find a proper way to discover whether we need to set the
* polarity reversal bit or not, instead of relying on the BIOS.
* TODO: find a proper way to discover whether we need to set the the
* polarity and link reversal bits or not, instead of relying on the
* BIOS.
*/
if (HAS_PCH_LPT(dev))
dev_priv->fdi_rx_polarity_reversed =
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
if (HAS_PCH_LPT(dev)) {
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
 
dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config;
}
}
/drivers/video/drm/i915/intel_ddi.c
84,7 → 84,8
* in either FDI or DP modes only, as HDMI connections will work with both
* of those
*/
void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port, bool use_fdi_mode)
static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port,
bool use_fdi_mode)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
114,17 → 115,18
{
int port;
 
if (IS_HASWELL(dev)) {
if (!HAS_DDI(dev))
return;
 
for (port = PORT_A; port < PORT_E; port++)
intel_prepare_ddi_buffers(dev, port, false);
 
/* DDI E is the suggested one to work in FDI mode, so program is as such by
* default. It will have to be re-programmed in case a digital DP output
* will be detected on it
/* DDI E is the suggested one to work in FDI mode, so program is as such
* by default. It will have to be re-programmed in case a digital DP
* output will be detected on it
*/
intel_prepare_ddi_buffers(dev, PORT_E, true);
}
}
 
static const long hsw_ddi_buf_ctl_values[] = {
DDI_BUF_EMP_400MV_0DB_HSW,
178,10 → 180,8
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
((intel_crtc->fdi_lanes - 1) << 19);
if (dev_priv->fdi_rx_polarity_reversed)
rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE | ((intel_crtc->fdi_lanes - 1) << 19);
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
203,7 → 203,10
DP_TP_CTL_LINK_TRAIN_PAT1 |
DP_TP_CTL_ENABLE);
 
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
* port reversal bit */
I915_WRITE(DDI_BUF_CTL(PORT_E),
DDI_BUF_CTL_ENABLE |
((intel_crtc->fdi_lanes - 1) << 1) |
675,10 → 678,14
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
 
intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
 
intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
intel_dp->DP = intel_dig_port->port_reversal |
DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
985,7 → 992,13
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (dev_priv->pch_pf_size)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
1069,7 → 1082,7
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = pipe;
cpu_transcoder = (enum transcoder) pipe;
 
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
1285,28 → 1298,48
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t tmp;
 
if (type == INTEL_OUTPUT_HDMI) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
 
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
I915_WRITE(DDI_BUF_CTL(port),
intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
ironlake_edp_backlight_on(intel_dp);
}
 
if (intel_crtc->eld_vld) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
}
 
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1313,6 → 1346,10
 
ironlake_edp_backlight_off(intel_dp);
}
 
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1354,8 → 1391,8
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
enum port port = intel_dig_port->port;
bool wait;
uint32_t val;
bool wait = false;
 
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
1452,11 → 1489,11
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
 
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
1497,6 → 1534,8
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
 
intel_dig_port->port = port;
intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
if (hdmi_connector)
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
else
/drivers/video/drm/i915/intel_display.c
166,8 → 166,8
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
.m1 = { .min = 10, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.m1 = { .min = 8, .max = 18 },
.m2 = { .min = 3, .max = 7 },
.p = { .min = 5, .max = 80 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 200000,
180,8 → 180,8
.vco = { .min = 1400000, .max = 2800000 },
.n = { .min = 1, .max = 6 },
.m = { .min = 70, .max = 120 },
.m1 = { .min = 10, .max = 22 },
.m2 = { .min = 5, .max = 9 },
.m1 = { .min = 8, .max = 18 },
.m2 = { .min = 3, .max = 7 },
.p = { .min = 7, .max = 98 },
.p1 = { .min = 1, .max = 8 },
.p2 = { .dot_limit = 112000,
428,13 → 428,11
 
u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
{
unsigned long flags;
u32 val = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
goto out_unlock;
return 0;
}
 
I915_WRITE(DPIO_REG, reg);
442,24 → 440,20
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO read wait timed out\n");
goto out_unlock;
return 0;
}
val = I915_READ(DPIO_DATA);
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
return val;
return I915_READ(DPIO_DATA);
}
 
static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
u32 val)
{
unsigned long flags;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
DRM_ERROR("DPIO idle wait timed out\n");
goto out_unlock;
return;
}
 
I915_WRITE(DPIO_DATA, val);
468,9 → 462,6
DPIO_BYTE);
if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
DRM_ERROR("DPIO write wait timed out\n");
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
 
static void vlv_init_dpio(struct drm_device *dev)
484,61 → 475,14
POSTING_READ(DPIO_CTL);
}
 
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
{
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
return 1;
}
 
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
.ident = "Apple MacBook Pro (Core i5/i7 Series)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
{ } /* terminating entry */
};
 
static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
unsigned int reg)
{
unsigned int val;
 
/* use the module option value if specified */
if (i915_lvds_channel_mode > 0)
return i915_lvds_channel_mode == 2;
 
// if (dmi_check_system(intel_dual_link_lvds))
// return true;
 
if (dev_priv->lvds_val)
val = dev_priv->lvds_val;
else {
/* BIOS should set the proper LVDS register value at boot, but
* in reality, it doesn't set the value when the lid is closed;
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
val = I915_READ(reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
dev_priv->lvds_val = val;
}
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
 
static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
if (intel_is_dual_link_lvds(dev)) {
/* LVDS dual channel */
if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
562,11 → 506,10
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if (is_dual_link_lvds(dev_priv, LVDS))
if (intel_is_dual_link_lvds(dev))
/* LVDS with dual channel */
limit = &intel_limits_g4x_dual_channel_lvds;
else
698,19 → 641,16
 
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int err = target;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
(I915_READ(LVDS)) != 0) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
/*
* For LVDS, if the panel is on, just rely on its current
* settings for dual-channel. We haven't figured out how to
* reliably set up different single/dual channel state, if we
* even can.
* For LVDS just rely on its current settings for dual-channel.
* We haven't figured out how to reliably set up different
* single/dual channel state, if we even can.
*/
if (is_dual_link_lvds(dev_priv, LVDS))
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
763,7 → 703,6
intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
778,8 → 717,7
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
if (intel_is_dual_link_lvds(dev))
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
1059,6 → 997,51
}
}
 
/*
* ibx_digital_port_connected - is the specified port connected?
* @dev_priv: i915 private structure
* @port: the port to test
*
* Returns true if @port is connected, false otherwise.
*/
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port)
{
u32 bit;
 
if (HAS_PCH_IBX(dev_priv->dev)) {
switch(port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG;
break;
case PORT_C:
bit = SDE_PORTC_HOTPLUG;
break;
case PORT_D:
bit = SDE_PORTD_HOTPLUG;
break;
default:
return true;
}
} else {
switch(port->port) {
case PORT_B:
bit = SDE_PORTB_HOTPLUG_CPT;
break;
case PORT_C:
bit = SDE_PORTC_HOTPLUG_CPT;
break;
case PORT_D:
bit = SDE_PORTD_HOTPLUG_CPT;
break;
default:
return true;
}
}
 
return I915_READ(SDEISR) & bit;
}
 
static const char *state_string(bool enabled)
{
return enabled ? "on" : "off";
1137,8 → 1120,8
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
if (HAS_DDI(dev_priv->dev)) {
/* DDI does not have a specific FDI_TX register */
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1182,7 → 1165,7
return;
 
/* On Haswell, DDI ports are responsible for the FDI PLL setup */
if (IS_HASWELL(dev_priv->dev))
if (HAS_DDI(dev_priv->dev))
return;
 
reg = FDI_TX_CTL(pipe);
1243,9 → 1226,15
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
 
if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
!(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
cur_state = false;
} else {
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
}
 
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
1521,13 → 1510,14
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
unsigned long flags;
u32 tmp;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
return;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
1542,11 → 1532,8
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
goto out_unlock;
return;
}
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
}
 
static u32
1553,13 → 1540,13
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
unsigned long flags;
u32 value = 0;
WARN_ON(!mutex_is_locked(&dev_priv->dpio_lock));
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
return 0;
}
 
I915_WRITE(SBI_ADDR, (reg << 16));
1573,14 → 1560,10
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
goto out_unlock;
return 0;
}
 
value = I915_READ(SBI_DATA);
 
out_unlock:
spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
return value;
return I915_READ(SBI_DATA);
}
 
/**
1712,8 → 1695,8
* make the BPC in transcoder be consistent with
* that in pipeconf reg.
*/
val &= ~PIPE_BPC_MASK;
val |= pipeconf_val & PIPE_BPC_MASK;
val &= ~PIPECONF_BPC_MASK;
val |= pipeconf_val & PIPECONF_BPC_MASK;
}
 
val &= ~TRANS_INTERLACE_MASK;
1740,7 → 1723,7
BUG_ON(dev_priv->info->gen < 5);
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
 
/* Workaround: set timing override bit. */
1828,11 → 1811,11
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum transcoder pch_transcoder;
enum pipe pch_transcoder;
int reg;
u32 val;
 
if (IS_HASWELL(dev_priv->dev))
if (HAS_PCH_LPT(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
1848,7 → 1831,8
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
assert_fdi_tx_pll_enabled(dev_priv,
(enum pipe) cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
2005,7 → 1989,12
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
ret = i915_gem_object_get_fence(obj);
if (ret)
goto err_unpin;
 
i915_gem_object_pin_fence(obj);
 
dev_priv->mm.interruptible = true;
return 0;
 
2024,19 → 2013,30
 
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int cpp,
unsigned int pitch)
{
int tile_rows, tiles;
if (tiling_mode != I915_TILING_NONE) {
unsigned int tile_rows, tiles;
 
tile_rows = *y / 8;
*y %= 8;
tiles = *x / (512/bpp);
*x %= 512/bpp;
 
tiles = *x / (512/cpp);
*x %= 512/cpp;
 
return tile_rows * pitch * 8 + tiles * 4096;
} else {
unsigned int offset;
 
offset = *y * pitch + *x * cpp;
*y = 0;
*x = (offset & 4095) / cpp;
return offset & -4096;
}
}
 
static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y)
2112,7 → 2112,7
 
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
2193,9 → 2193,9
return -EINVAL;
}
 
// if (obj->tiling_mode != I915_TILING_NONE)
// dspcntr |= DISPPLANE_TILED;
// else
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
 
/* must disable */
2205,7 → 2205,7
 
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
2250,10 → 2250,6
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
 
wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
 
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
2330,43 → 2326,6
return 0;
}
 
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
if (clock < 200000) {
u32 temp;
dpa_ctl |= DP_PLL_FREQ_160MHZ;
/* workaround for 160Mhz:
1) program 0x4600c bits 15:0 = 0x8124
2) program 0x46010 bit 0 = 1
3) program 0x46034 bit 24 = 1
4) program 0x64000 bit 14 = 1
*/
temp = I915_READ(0x4600c);
temp &= 0xffff0000;
I915_WRITE(0x4600c, temp | 0x8124);
 
temp = I915_READ(0x46010);
I915_WRITE(0x46010, temp | 1);
 
temp = I915_READ(0x46034);
I915_WRITE(0x46034, temp | (1 << 24));
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
}
I915_WRITE(DP_A, dpa_ctl);
 
POSTING_READ(DP_A);
udelay(500);
}
 
static void intel_fdi_normal_train(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
2795,7 → 2754,7
temp = I915_READ(reg);
temp &= ~((0x7 << 19) | (0x7 << 16));
temp |= (intel_crtc->fdi_lanes - 1) << 19;
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
 
POSTING_READ(reg);
2808,9 → 2767,6
POSTING_READ(reg);
udelay(200);
 
/* On Haswell, the PLL configuration for ports and pipes is handled
* separately, as part of DDI setup */
if (!IS_HASWELL(dev)) {
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
2821,7 → 2777,6
udelay(100);
}
}
}
 
static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
{
2869,7 → 2824,7
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
 
POSTING_READ(reg);
2898,7 → 2853,7
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
I915_WRITE(reg, temp);
 
POSTING_READ(reg);
2909,10 → 2864,12
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
unsigned long flags;
bool pending;
 
if (atomic_read(&dev_priv->mm.wedged))
if (i915_reset_in_progress(&dev_priv->gpu_error) ||
intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
return false;
 
spin_lock_irqsave(&dev->event_lock, flags);
2931,6 → 2888,8
if (crtc->fb == NULL)
return;
 
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
 
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
 
2974,6 → 2933,8
u32 divsel, phaseinc, auxdiv, phasedir = 0;
u32 temp;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* It is necessary to ungate the pixclk gate prior to programming
* the divisors, and gate it back when it is done.
*/
3048,6 → 3009,8
udelay(24);
 
I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
/*
3128,7 → 3091,7
if (HAS_PCH_CPT(dev) &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
3602,7 → 3565,7
 
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
intel_crtc->cpu_transcoder = intel_crtc->pipe;
intel_crtc->cpu_transcoder = (enum transcoder) intel_crtc->pipe;
 
intel_ddi_put_crtc_pll(crtc);
}
3625,6 → 3588,30
*/
}
 
/**
* i9xx_fixup_plane - ugly workaround for G45 to fire up the hardware
* cursor plane briefly if not already running after enabling the display
* plane.
* This workaround avoids occasional blank screens when self refresh is
* enabled.
*/
static void
g4x_fixup_plane(struct drm_i915_private *dev_priv, enum pipe pipe)
{
u32 cntl = I915_READ(CURCNTR(pipe));
 
if ((cntl & CURSOR_MODE) == 0) {
u32 fw_bcl_self = I915_READ(FW_BLC_SELF);
 
I915_WRITE(FW_BLC_SELF, fw_bcl_self & ~FW_BLC_SELF_EN);
I915_WRITE(CURCNTR(pipe), CURSOR_MODE_64_ARGB_AX);
intel_wait_for_vblank(dev_priv->dev, pipe);
I915_WRITE(CURCNTR(pipe), cntl);
I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
I915_WRITE(FW_BLC_SELF, fw_bcl_self);
}
}
 
static void i9xx_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3643,8 → 3630,15
intel_update_watermarks(dev);
 
intel_enable_pll(dev_priv, pipe);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
intel_enable_pipe(dev_priv, pipe, false);
intel_enable_plane(dev_priv, plane, pipe);
if (IS_G4X(dev))
g4x_fixup_plane(dev_priv, pipe);
 
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
3665,6 → 3659,7
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 pctl;
 
 
if (!intel_crtc->active)
3684,6 → 3679,13
 
intel_disable_plane(dev_priv, plane, pipe);
intel_disable_pipe(dev_priv, pipe);
 
/* Disable pannel fitter if it is on this pipe. */
pctl = I915_READ(PFIT_CONTROL);
if ((pctl & PFIT_ENABLE) &&
((pctl & PFIT_PIPE_MASK) >> PFIT_PIPE_SHIFT) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
 
intel_disable_pll(dev_priv, pipe);
 
intel_crtc->active = false;
3750,19 → 3752,17
intel_crtc_update_sarea(crtc, enable);
}
 
static void intel_crtc_noop(struct drm_crtc *crtc)
{
}
 
static void intel_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
 
intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
3800,10 → 3800,6
}
}
 
void intel_encoder_noop(struct drm_encoder *encoder)
{
}
 
void intel_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3995,16 → 3991,8
return 133000;
}
 
struct fdi_m_n {
u32 tu;
u32 gmch_m;
u32 gmch_n;
u32 link_m;
u32 link_n;
};
 
static void
fdi_reduce_ratio(u32 *num, u32 *den)
intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
4012,20 → 4000,18
}
}
 
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
void
intel_link_compute_m_n(int bits_per_pixel, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n)
{
m_n->tu = 64; /* default size */
 
/* BUG_ON(pixel_clock > INT_MAX / 36); */
m_n->tu = 64;
m_n->gmch_m = bits_per_pixel * pixel_clock;
m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
 
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
 
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
4272,51 → 4258,6
}
}
 
static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 temp;
 
temp = I915_READ(LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock->p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
 
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
/* set the dithering flag on LVDS as needed */
if (INTEL_INFO(dev)->gen >= 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(LVDS, temp);
}
 
static void vlv_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
4332,6 → 4273,8
bool is_sdvo;
u32 temp;
 
mutex_lock(&dev_priv->dpio_lock);
 
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
4415,6 → 4358,8
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
static void i9xx_update_pll(struct drm_crtc *crtc,
4426,6 → 4371,7
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
bool is_sdvo;
4494,12 → 4440,9
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_dp_set_m_n(crtc, mode, adjusted_mode);
4538,6 → 4481,7
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
u32 dpll;
 
4571,12 → 4515,9
POSTING_READ(DPLL(pipe));
udelay(150);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
intel_update_lvds(crtc, clock, adjusted_mode);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
I915_WRITE(DPLL(pipe), dpll);
 
4766,10 → 4707,10
}
 
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
pipeconf &= ~(PIPECONF_BPC_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
pipeconf |= PIPECONF_6BPC |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
4777,7 → 4718,7
 
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
pipeconf |= PIPECONF_6BPC |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
}
4964,6 → 4905,8
if (!has_vga)
return;
 
mutex_lock(&dev_priv->dpio_lock);
 
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
5106,6 → 5049,8
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
 
mutex_unlock(&dev_priv->dpio_lock);
}
 
/*
5160,19 → 5105,19
 
val = I915_READ(PIPECONF(pipe));
 
val &= ~PIPE_BPC_MASK;
val &= ~PIPECONF_BPC_MASK;
switch (intel_crtc->bpp) {
case 18:
val |= PIPE_6BPC;
val |= PIPECONF_6BPC;
break;
case 24:
val |= PIPE_8BPC;
val |= PIPECONF_8BPC;
break;
case 30:
val |= PIPE_10BPC;
val |= PIPECONF_10BPC;
break;
case 36:
val |= PIPE_12BPC;
val |= PIPECONF_12BPC;
break;
default:
/* Case prevented by intel_choose_pipe_bpp_dither. */
5189,10 → 5134,80
else
val |= PIPECONF_PROGRESSIVE;
 
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
val |= PIPECONF_COLOR_RANGE_SELECT;
else
val &= ~PIPECONF_COLOR_RANGE_SELECT;
 
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
 
/*
* Set up the pipe CSC unit.
*
* Currently only full range RGB to limited range RGB conversion
* is supported, but eventually this should handle various
* RGB<->YCbCr scenarios as well.
*/
static void intel_set_pipe_csc(struct drm_crtc *crtc,
const struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
uint16_t coeff = 0x7800; /* 1.0 */
 
/*
* TODO: Check what kind of values actually come out of the pipe
* with these coeff/postoff values and adjust to get the best
* accuracy. Perhaps we even need to take the bpc value into
* consideration.
*/
 
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
coeff = ((235 - 16) * (1 << 12) / 255) & 0xff8; /* 0.xxx... */
 
/*
* GY/GU and RY/RU should be the other way around according
* to BSpec, but reality doesn't agree. Just set them up in
* a way that results in the correct picture.
*/
I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff << 16);
I915_WRITE(PIPE_CSC_COEFF_BY(pipe), 0);
 
I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff);
I915_WRITE(PIPE_CSC_COEFF_BU(pipe), 0);
 
I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), 0);
I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff << 16);
 
I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), 0);
I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), 0);
 
if (INTEL_INFO(dev)->gen > 6) {
uint16_t postoff = 0;
 
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
postoff = (16 * (1 << 13) / 255) & 0x1fff;
 
I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff);
I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff);
 
I915_WRITE(PIPE_CSC_MODE(pipe), 0);
} else {
uint32_t mode = CSC_MODE_YUV_TO_RGB;
 
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
mode |= CSC_BLACK_SCREEN_OFFSET;
 
I915_WRITE(PIPE_CSC_MODE(pipe), mode);
}
}
 
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
5383,7 → 5398,7
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct intel_encoder *intel_encoder, *edp_encoder = NULL;
struct fdi_m_n m_n = {0};
struct intel_link_m_n m_n = {0};
int target_clock, pixel_multiplier, lane, link_bw;
bool is_dp = false, is_cpu_edp = false;
 
5435,8 → 5450,7
 
if (pixel_multiplier > 1)
link_bw *= pixel_multiplier;
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
intel_link_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, &m_n);
 
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
5489,7 → 5503,7
if (is_lvds) {
if ((intel_panel_use_ssc(dev_priv) &&
dev_priv->lvds_ssc_freq == 100) ||
(I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
intel_is_dual_link_lvds(dev))
factor = 25;
} else if (is_sdvo && is_tv)
factor = 20;
5564,7 → 5578,6
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
u32 temp;
int ret;
bool dither, fdi_config_ok;
 
5628,55 → 5641,13
} else
intel_put_pch_pll(intel_crtc);
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
if (pipe == 1)
temp |= LVDS_PIPEB_SELECT;
else
temp &= ~LVDS_PIPEB_SELECT;
}
if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
 
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_pll_enable)
encoder->pre_pll_enable(encoder);
 
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
 
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
/* For non-DP output, clear any trans DP clock recovery setting.*/
I915_WRITE(TRANSDATA_M1(pipe), 0);
I915_WRITE(TRANSDATA_N1(pipe), 0);
I915_WRITE(TRANSDPLINK_M1(pipe), 0);
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
 
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 
5710,9 → 5681,6
 
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_wait_for_vblank(dev, pipe);
5730,6 → 5698,35
return fdi_config_ok ? ret : -EINVAL;
}
 
static void haswell_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = false;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (crtc->pipe != PIPE_A && crtc->base.enabled)
enable = true;
/* XXX: Should check for edp transcoder here, but thanks to init
* sequence that's not yet available. Just in case desktop eDP
* on PORT D is possible on haswell, too. */
}
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->type != INTEL_OUTPUT_EDP &&
encoder->connectors_active)
enable = true;
}
 
/* Even the eDP panel fitter is outside the always-on well. */
if (dev_priv->pch_pf_size)
enable = true;
 
intel_set_power_well(dev, enable);
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
5742,20 → 5739,13
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
bool is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
u32 temp;
int ret;
bool dither;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
5789,16 → 5779,6
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
&has_reduced_clock,
&reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
5805,131 → 5785,26
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
adjusted_mode);
if (is_lvds && dev_priv->lvds_dither)
dither = true;
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
 
dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
fp);
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its
* own on pre-Haswell/LPT generation */
if (!is_cpu_edp) {
struct intel_pch_pll *pll;
 
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
pipe);
return -EINVAL;
}
} else
intel_put_pch_pll(intel_crtc);
 
/* The LVDS pin pair needs to be on before the DPLLs are
* enabled. This is an exception to the general rule that
* mode_set doesn't turn things on.
*/
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
if (pipe == 1)
temp |= LVDS_PIPEB_SELECT;
else
temp &= ~LVDS_PIPEB_SELECT;
}
 
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether
* we're going to set the DPLLs for dual-channel mode or
* not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP |
LVDS_CLKB_POWER_UP);
 
/* It would be nice to set 24 vs 18-bit mode
* (LVDS_A3_POWER_UP) appropriately here, but we need to
* look more thoroughly into how panels behave in the
* two modes.
*/
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
}
 
if (is_dp && !is_cpu_edp) {
if (is_dp && !is_cpu_edp)
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
/* For non-DP output, clear any trans DP clock recovery
* setting.*/
I915_WRITE(TRANSDATA_M1(pipe), 0);
I915_WRITE(TRANSDATA_N1(pipe), 0);
I915_WRITE(TRANSDPLINK_M1(pipe), 0);
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
}
 
intel_crtc->lowfreq_avail = false;
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
 
if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
}
}
}
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
if (!is_dp || is_cpu_edp)
ironlake_set_m_n(crtc, mode, adjusted_mode);
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
haswell_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_set_pipe_csc(crtc, adjusted_mode);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE | DISPPLANE_PIPE_CSC_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
ret = intel_pipe_set_base(crtc, x, y, fb);
6051,6 → 5926,7
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
6092,6 → 5968,7
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
 
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
intel_crtc->eld_vld = true;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
6328,6 → 6205,8
cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
cntl |= CURSOR_MODE_DISABLE;
}
if (IS_HASWELL(dev))
cntl |= CURSOR_PIPE_CSC_ENABLE;
I915_WRITE(CURCNTR_IVB(pipe), cntl);
 
intel_crtc->cursor_visible = visible;
6685,6 → 6564,8
if (encoder->crtc) {
crtc = encoder->crtc;
 
mutex_lock(&crtc->mutex);
 
old->dpms_mode = connector->dpms;
old->load_detect_temp = false;
 
6714,6 → 6595,7
return false;
}
 
mutex_lock(&crtc->mutex);
intel_encoder->new_crtc = to_intel_crtc(crtc);
to_intel_connector(connector)->new_encoder = intel_encoder;
 
6741,13 → 6623,15
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
mutex_unlock(&crtc->mutex);
return false;
}
 
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
if (intel_set_mode(crtc, mode, 0, 0, fb)) {
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
mutex_unlock(&crtc->mutex);
return false;
}
 
6762,6 → 6646,7
struct intel_encoder *intel_encoder =
intel_attached_encoder(connector);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
 
DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
connector->base.id, drm_get_connector_name(connector),
6768,15 → 6653,16
encoder->base.id, drm_get_encoder_name(encoder));
 
if (old->load_detect_temp) {
struct drm_crtc *crtc = encoder->crtc;
 
to_intel_connector(connector)->new_encoder = NULL;
intel_encoder->new_crtc = NULL;
intel_set_mode(crtc, NULL, 0, 0, NULL);
 
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
if (old->release_fb) {
drm_framebuffer_unregister_private(old->release_fb);
drm_framebuffer_unreference(old->release_fb);
}
 
mutex_unlock(&crtc->mutex);
return;
}
 
6783,6 → 6669,8
/* Switch crtc and encoder back off if necessary */
if (old->dpms_mode != DRM_MODE_DPMS_ON)
connector->funcs->dpms(connector, old->dpms_mode);
 
mutex_unlock(&crtc->mutex);
}
 
/* Returns the clock of the currently programmed mode of the given pipe. */
6978,15 → 6866,8
 
void intel_mark_idle(struct drm_device *dev)
{
}
 
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
 
ENTER();
 
if (!i915_powersave)
return;
 
6994,12 → 6875,11
if (!crtc->fb)
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj == obj)
intel_increase_pllclock(crtc);
intel_decrease_pllclock(crtc);
}
}
 
void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
7012,7 → 6892,7
continue;
 
if (to_intel_framebuffer(crtc->fb)->obj == obj)
intel_decrease_pllclock(crtc);
intel_increase_pllclock(crtc);
}
}
 
7097,9 → 6977,7
 
obj = work->old_fb_obj;
 
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
wake_up(&dev_priv->pending_flip_queue);
wake_up_all(&dev_priv->pending_flip_queue);
 
queue_work(dev_priv->wq, &work->work);
 
7395,8 → 7273,8
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj;
struct drm_framebuffer *old_fb = crtc->fb;
struct drm_i915_gem_object *obj = to_intel_framebuffer(fb)->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags;
7421,8 → 7299,7
 
work->event = event;
work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
work->old_fb_obj = to_intel_framebuffer(old_fb)->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
 
ret = drm_vblank_get(dev, intel_crtc->pipe);
7442,9 → 7319,6
intel_crtc->unpin_work = work;
spin_unlock_irqrestore(&dev->event_lock, flags);
 
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
 
7462,11 → 7336,8
 
work->enable_stall_check = true;
 
/* Block clients from rendering to the new back buffer until
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
7482,7 → 7353,7
 
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
crtc->fb = old_fb;
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
7504,7 → 7375,6
static struct drm_crtc_helper_funcs intel_helper_funcs = {
.mode_set_base_atomic = intel_pipe_set_base_atomic,
.load_lut = intel_crtc_load_lut,
.disable = intel_crtc_noop,
};
 
bool intel_encoder_check_is_cloned(struct intel_encoder *encoder)
7894,17 → 7764,22
}
}
 
bool intel_set_mode(struct drm_crtc *crtc,
int intel_set_mode(struct drm_crtc *crtc,
struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_display_mode *adjusted_mode, *saved_mode, *saved_hwmode;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
int ret = 0;
 
saved_mode = kmalloc(2 * sizeof(*saved_mode), GFP_KERNEL);
if (!saved_mode)
return -ENOMEM;
saved_hwmode = saved_mode + 1;
 
intel_modeset_affected_pipes(crtc, &modeset_pipes,
&prepare_pipes, &disable_pipes);
 
7914,8 → 7789,8
for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
intel_crtc_disable(&intel_crtc->base);
 
saved_hwmode = crtc->hwmode;
saved_mode = crtc->mode;
*saved_hwmode = crtc->hwmode;
*saved_mode = crtc->mode;
 
/* Hack: Because we don't (yet) support global modeset on multiple
* crtcs, we don't keep track of the new mode for more than one crtc.
7926,7 → 7801,8
if (modeset_pipes) {
adjusted_mode = intel_modeset_adjusted_mode(crtc, mode);
if (IS_ERR(adjusted_mode)) {
return false;
ret = PTR_ERR(adjusted_mode);
goto out;
}
}
 
7952,10 → 7828,10
* on the DPLL.
*/
for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
ret = !intel_crtc_mode_set(&intel_crtc->base,
ret = intel_crtc_mode_set(&intel_crtc->base,
mode, adjusted_mode,
x, y, fb);
if (!ret)
if (ret)
goto done;
}
 
7977,16 → 7853,23
/* FIXME: add subpixel order */
done:
drm_mode_destroy(dev, adjusted_mode);
if (!ret && crtc->enabled) {
crtc->hwmode = saved_hwmode;
crtc->mode = saved_mode;
if (ret && crtc->enabled) {
crtc->hwmode = *saved_hwmode;
crtc->mode = *saved_mode;
} else {
intel_modeset_check_state(dev);
}
 
out:
kfree(saved_mode);
return ret;
}
 
void intel_crtc_restore_mode(struct drm_crtc *crtc)
{
intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->fb);
}
 
#undef for_each_intel_crtc_masked
 
static void intel_set_config_free(struct intel_set_config *config)
8099,7 → 7982,7
struct intel_encoder *encoder;
int count, ro;
 
/* The upper layers ensure that we either disabl a crtc or have a list
/* The upper layers ensure that we either disable a crtc or have a list
* of connectors. For paranoia, double-check this. */
WARN_ON(!set->fb && (set->num_connectors != 0));
WARN_ON(set->fb && (set->num_connectors == 0));
8201,15 → 8084,10
BUG_ON(!set->crtc);
BUG_ON(!set->crtc->helper_private);
 
if (!set->mode)
set->fb = NULL;
/* Enforce sane interface api - has been abused by the fb helper. */
BUG_ON(!set->mode && set->fb);
BUG_ON(set->fb && set->num_connectors == 0);
 
/* The fb helper likes to play gross jokes with ->mode_set_config.
* Unfortunately the crtc helper doesn't do much at all for this case,
* so we have to cope with this madness until the fb helper is fixed up. */
if (set->fb && set->num_connectors == 0)
return 0;
 
if (set->fb) {
DRM_DEBUG_KMS("[CRTC:%d] [FB:%d] #connectors=%d (x y) (%i %i)\n",
set->crtc->base.id, set->fb->base.id,
8252,11 → 8130,11
drm_mode_debug_printmodeline(set->mode);
}
 
if (!intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb)) {
DRM_ERROR("failed to set mode on [CRTC:%d]\n",
set->crtc->base.id);
ret = -EINVAL;
ret = intel_set_mode(set->crtc, set->mode,
set->x, set->y, set->fb);
if (ret) {
DRM_ERROR("failed to set mode on [CRTC:%d], err = %d\n",
set->crtc->base.id, ret);
goto fail;
}
} else if (config->fb_changed) {
8273,7 → 8151,7
 
/* Try to restore the config */
if (config->mode_changed &&
!intel_set_mode(save_set.crtc, save_set.mode,
intel_set_mode(save_set.crtc, save_set.mode,
save_set.x, save_set.y, save_set.fb))
DRM_ERROR("failed to restore config after modeset failure\n");
 
8293,7 → 8171,7
 
static void intel_cpu_pll_init(struct drm_device *dev)
{
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
intel_ddi_pll_init(dev);
}
 
8426,11 → 8304,10
I915_WRITE(PFIT_CONTROL, 0);
}
 
if (!(IS_HASWELL(dev) &&
(I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
if (!(HAS_DDI(dev) && (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
intel_crt_init(dev);
 
if (IS_HASWELL(dev)) {
if (HAS_DDI(dev)) {
int found;
 
/* Haswell uses DDI functions to detect digital outputs */
8477,23 → 8354,18
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
 
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
intel_dp_init(dev, DP_C, PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
 
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
if (!found)
intel_hdmi_init(dev, SDVOB, PORT_B);
if (!found && (I915_READ(DP_B) & DP_DETECTED))
intel_dp_init(dev, DP_B, PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
 
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
 
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
8635,6 → 8507,9
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
 
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
 
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
8641,8 → 8516,6
return ret;
}
 
drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
intel_fb->obj = obj;
return 0;
}
 
8649,7 → 8522,7
 
static const struct drm_mode_config_funcs intel_mode_funcs = {
.fb_create = NULL /*intel_user_framebuffer_create*/,
.output_poll_changed = NULL /*intel_fb_output_poll_changed*/,
.output_poll_changed = intel_fb_output_poll_changed,
};
 
/* Set up chip specific display functions */
8658,7 → 8531,7
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* We always want a DPMS function */
if (IS_HASWELL(dev)) {
if (HAS_DDI(dev)) {
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
8720,8 → 8593,9
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
} else
dev_priv->display.update_wm = NULL;
dev_priv->display.modeset_global_resources =
haswell_modeset_global_resources;
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
}
8825,6 → 8699,18
 
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
 
/* Acer/eMachines G725 */
{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
 
/* Acer/eMachines e725 */
{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
 
/* Acer/Packard Bell NCL20 */
{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
 
/* Acer Aspire 4736Z */
{ 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
};
 
static void intel_init_quirks(struct drm_device *dev)
8853,13 → 8739,8
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg;
u32 vga_reg = i915_vgacntrl_reg(dev);
 
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
 
// vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
out8(SR01, VGA_SR_INDEX);
sr1 = in8(VGA_SR_DATA);
8873,10 → 8754,7
 
void intel_modeset_init_hw(struct drm_device *dev)
{
/* We attempt to init the necessary power wells early in the initialization
* time, so the subsystems that expect power to be enabled can work.
*/
intel_init_power_wells(dev);
intel_init_power_well(dev);
 
intel_prepare_ddi(dev);
 
8918,7 → 8796,7
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
 
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
8936,6 → 8814,9
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
 
/* Just in case the BIOS is doing something questionable. */
intel_disable_fbc(dev);
}
 
static void
9129,7 → 9010,7
struct intel_encoder *encoder;
struct intel_connector *connector;
 
if (IS_HASWELL(dev)) {
if (HAS_DDI(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
if (tmp & TRANS_DDI_FUNC_ENABLE) {
9170,7 → 9051,7
crtc->active ? "enabled" : "disabled");
}
 
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
intel_ddi_setup_hw_pll_state(dev);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
9221,9 → 9102,7
 
if (force_restore) {
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_set_mode(&crtc->base, &crtc->base.mode,
crtc->base.x, crtc->base.y, crtc->base.fb);
intel_crtc_restore_mode(dev_priv->pipe_to_crtc_mapping[pipe]);
}
 
// i915_redisable_vga(dev);
/drivers/video/drm/i915/intel_dp.c
148,15 → 148,6
return max_link_bw;
}
 
static int
intel_dp_link_clock(uint8_t link_bw)
{
if (link_bw == DP_LINK_BW_2_7)
return 270000;
else
return 162000;
}
 
/*
* The units on the numbers in the next two are... bizarre. Examples will
* make it clearer; this one parallels an example in the eDP spec.
191,7 → 182,8
struct drm_display_mode *mode,
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_link_clock =
drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
 
330,6 → 322,49
}
}
 
static uint32_t
intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = intel_dp->output_reg + 0x10;
uint32_t status;
bool done;
 
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
ch_ctl = DPA_AUX_CH_CTL;
break;
case PORT_B:
ch_ctl = PCH_DPB_AUX_CH_CTL;
break;
case PORT_C:
ch_ctl = PCH_DPC_AUX_CH_CTL;
break;
case PORT_D:
ch_ctl = PCH_DPD_AUX_CH_CTL;
break;
default:
BUG();
}
}
 
#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
if (has_aux_irq)
done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
msecs_to_jiffies(10));
else
done = wait_for_atomic(C, 10) == 0;
if (!done)
DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
has_aux_irq);
#undef C
 
return status;
}
 
static int
intel_dp_aux_ch(struct intel_dp *intel_dp,
uint8_t *send, int send_bytes,
341,12 → 376,18
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
int i;
int recv_bytes;
int i, ret, recv_bytes;
uint32_t status;
uint32_t aux_clock_divider;
int try, precharge;
bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
 
/* dp aux is extremely sensitive to irq latency, hence request the
* lowest possible wakeup latency and so prevent the cpu from going into
* deep sleep states.
*/
// pm_qos_update_request(&dev_priv->pm_qos, 0);
 
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
379,7 → 420,7
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100;
399,7 → 440,7
 
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
status = I915_READ(ch_ctl);
status = I915_READ_NOTRACE(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
408,7 → 449,8
if (try == 3) {
WARN(1, "dp_aux_ch not started status 0x%08x\n",
I915_READ(ch_ctl));
return -EBUSY;
ret = -EBUSY;
goto out;
}
 
/* Must try at least 3 times according to DP spec */
421,6 → 463,7
/* Send the command and wait for it to complete */
I915_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
(has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
DP_AUX_CH_CTL_TIME_OUT_400us |
(send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
(precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
428,13 → 471,9
DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
for (;;) {
status = I915_READ(ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
udelay(100);
}
 
status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
 
/* Clear done status and any errors */
I915_WRITE(ch_ctl,
status |
451,7 → 490,8
 
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
return -EBUSY;
ret = -EBUSY;
goto out;
}
 
/* Check for timeout or receive error.
459,7 → 499,8
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
return -EIO;
ret = -EIO;
goto out;
}
 
/* Timeouts occur when the device isn't connected, so they're
466,7 → 507,8
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
return -ETIMEDOUT;
ret = -ETIMEDOUT;
goto out;
}
 
/* Unload any bytes sent back from the other side */
479,7 → 521,11
unpack_aux(I915_READ(ch_data + i),
recv + i, recv_bytes - i);
 
return recv_bytes;
ret = recv_bytes;
out:
// pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
 
return ret;
}
 
/* Write data to the aux channel in native mode */
718,16 → 764,35
return false;
 
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
 
if (intel_dp->color_range_auto) {
/*
* See:
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
intel_dp->color_range = DP_COLOR_RANGE_16_235;
else
intel_dp->color_range = 0;
}
 
if (intel_dp->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
 
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
 
for (clock = 0; clock <= max_clock; clock++) {
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
int link_bw_clock =
drm_dp_bw_code_to_link_rate(bws[clock]);
int link_avail = intel_dp_max_data_rate(link_bw_clock,
lane_count);
 
if (mode_rate <= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
adjusted_mode->clock = link_bw_clock;
DRM_DEBUG_KMS("DP link bw %02x lane "
"count %d clock %d bpp %d\n",
intel_dp->link_bw, intel_dp->lane_count,
742,39 → 807,6
return false;
}
 
struct intel_dp_m_n {
uint32_t tu;
uint32_t gmch_m;
uint32_t gmch_n;
uint32_t link_m;
uint32_t link_n;
};
 
static void
intel_reduce_ratio(uint32_t *num, uint32_t *den)
{
while (*num > 0xffffff || *den > 0xffffff) {
*num >>= 1;
*den >>= 1;
}
}
 
static void
intel_dp_compute_m_n(int bpp,
int nlanes,
int pixel_clock,
int link_clock,
struct intel_dp_m_n *m_n)
{
m_n->tu = 64;
m_n->gmch_m = (pixel_clock * bpp) >> 3;
m_n->gmch_n = link_clock * nlanes;
intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
m_n->link_m = pixel_clock;
m_n->link_n = link_clock;
intel_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
 
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
785,9 → 817,10
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
struct intel_link_m_n m_n;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
int target_clock;
 
/*
* Find the lane count in the intel_encoder private
803,13 → 836,22
}
}
 
target_clock = mode->clock;
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
if (intel_encoder->type == INTEL_OUTPUT_EDP) {
target_clock = intel_edp_target_clock(intel_encoder,
mode);
break;
}
}
 
/*
* Compute the GMCH and Link ratios. The '3' here is
* the number of bytes_per_pixel post-LUT, which we always
* set up for 8-bits of R/G/B, or 3 bytes total.
*/
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
intel_link_compute_m_n(intel_crtc->bpp, lane_count,
target_clock, adjusted_mode->clock, &m_n);
 
if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
851,6 → 893,32
}
}
 
static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
dpa_ctl = I915_READ(DP_A);
dpa_ctl &= ~DP_PLL_FREQ_MASK;
 
if (clock < 200000) {
/* For a long time we've carried around a ILK-DevA w/a for the
* 160MHz clock. If we're really unlucky, it's still required.
*/
DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
dpa_ctl |= DP_PLL_FREQ_160MHZ;
} else {
dpa_ctl |= DP_PLL_FREQ_270MHZ;
}
 
I915_WRITE(DP_A, dpa_ctl);
 
POSTING_READ(DP_A);
udelay(500);
}
 
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
926,6 → 994,7
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
if (!HAS_PCH_SPLIT(dev))
intel_dp->DP |= intel_dp->color_range;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
950,6 → 1019,9
} else {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
}
 
if (is_cpu_edp(intel_dp))
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
}
 
#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1057,6 → 1129,8
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
 
if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_FORCE_VDD;
1543,7 → 1617,7
}
 
static uint32_t
intel_dp_signal_levels(uint8_t train_set)
intel_gen4_signal_levels(uint8_t train_set)
{
uint32_t signal_levels = 0;
 
1641,7 → 1715,7
 
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
intel_dp_signal_levels_hsw(uint8_t train_set)
intel_hsw_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
1673,6 → 1747,34
}
}
 
/* Properly updates "DP" with the correct signal levels. */
static void
intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
{
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
uint32_t signal_levels, mask;
uint8_t train_set = intel_dp->train_set[0];
 
if (IS_HASWELL(dev)) {
signal_levels = intel_hsw_signal_levels(train_set);
mask = DDI_BUF_EMP_MASK;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(train_set);
mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
} else {
signal_levels = intel_gen4_signal_levels(train_set);
mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
}
 
DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
 
*DP = (*DP & ~mask) | signal_levels;
}
 
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
1696,6 → 1798,8
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
 
if (port != PORT_A) {
temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), temp);
 
1704,6 → 1808,8
DRM_ERROR("Timed out waiting for DP idle patterns\n");
 
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
}
 
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
break;
1791,7 → 1897,7
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
 
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
intel_ddi_prepare_link_retrain(encoder);
 
/* Write the link configuration data */
1809,24 → 1915,8
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
 
if (IS_HASWELL(dev)) {
signal_levels = intel_dp_signal_levels_hsw(
intel_dp->train_set[0]);
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
signal_levels);
intel_dp_set_signal_levels(intel_dp, &DP);
 
/* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
1850,7 → 1940,7
for (i = 0; i < intel_dp->lane_count; i++)
if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
if (i == intel_dp->lane_count && voltage_tries == 5) {
if (i == intel_dp->lane_count) {
++loop_tries;
if (loop_tries == 5) {
DRM_DEBUG_KMS("too many full retries, give up\n");
1882,7 → 1972,6
void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
1892,8 → 1981,6
cr_tries = 0;
channel_eq = false;
for (;;) {
/* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
uint32_t signal_levels;
uint8_t link_status[DP_LINK_STATUS_SIZE];
 
if (cr_tries > 5) {
1902,19 → 1989,7
break;
}
 
if (IS_HASWELL(dev)) {
signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
intel_dp_set_signal_levels(intel_dp, &DP);
 
/* channel eq pattern */
if (!intel_dp_set_link_train(intel_dp, DP,
1964,6 → 2039,8
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc =
to_intel_crtc(intel_dig_port->base.base.crtc);
uint32_t DP = intel_dp->DP;
 
/*
1981,7 → 2058,7
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
return;
 
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1998,7 → 2075,8
}
POSTING_READ(intel_dp->output_reg);
 
msleep(17);
/* We don't really know why we're doing this */
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
2018,19 → 2096,14
/* Changes to enable or select take place the vblank
* after being written.
*/
if (crtc == NULL) {
/* We can arrive here never having been attached
* to a CRTC, for instance, due to inheriting
* random state from the BIOS.
*
* If the pipe is not running, play safe and
* wait for the clocks to stabilise before
* continuing.
*/
if (WARN_ON(crtc == NULL)) {
/* We should never try to disable a port without a crtc
* attached. For paranoia keep the code around for a
* bit. */
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
DP &= ~DP_AUDIO_OUTPUT_ENABLE;
2042,10 → 2115,16
static bool
intel_dp_get_dpcd(struct intel_dp *intel_dp)
{
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
sizeof(intel_dp->dpcd)) == 0)
return false; /* aux transfer failed */
 
hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
if (intel_dp->dpcd[DP_DPCD_REV] == 0)
return false; /* DPCD not present */
 
2206,6 → 2285,8
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
enum drm_connector_status status;
 
/* Can't disconnect eDP, but you can close the lid... */
2216,6 → 2297,9
return status;
}
 
if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
return connector_status_disconnected;
 
return intel_dp_detect_dpcd(intel_dp);
}
 
2224,17 → 2308,18
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
 
switch (intel_dp->output_reg) {
case DP_B:
bit = DPB_HOTPLUG_LIVE_STATUS;
switch (intel_dig_port->port) {
case PORT_B:
bit = PORTB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
bit = DPC_HOTPLUG_LIVE_STATUS;
case PORT_C:
bit = PORTC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
bit = DPD_HOTPLUG_LIVE_STATUS;
case PORT_D:
bit = PORTD_HOTPLUG_LIVE_STATUS;
break;
default:
return connector_status_unknown;
2290,13 → 2375,6
return intel_ddc_get_modes(connector, adapter);
}
 
 
/**
* Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection.
*
* \return true if DP port is connected.
* \return false if DP port is disconnected.
*/
static enum drm_connector_status
intel_dp_detect(struct drm_connector *connector, bool force)
{
2306,7 → 2384,6
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
intel_dp->has_audio = false;
 
2315,10 → 2392,6
else
status = g4x_dp_detect(intel_dp);
 
// hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
// 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
// DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
if (status != connector_status_connected)
return status;
 
2396,7 → 2469,7
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
#if 0
 
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
2419,13 → 2492,23
}
 
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_dp->color_range)
return 0;
 
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_dp->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_dp->color_range_auto = false;
intel_dp->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_dp->color_range_auto = false;
intel_dp->color_range = DP_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done;
}
#endif
 
if (is_edp(intel_dp) &&
property == connector->dev->mode_config.scaling_mode_property) {
2446,11 → 2529,8
return -EINVAL;
 
done:
if (intel_encoder->base.crtc) {
struct drm_crtc *crtc = intel_encoder->base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
if (intel_encoder->base.crtc)
intel_crtc_restore_mode(intel_encoder->base.crtc);
 
return 0;
}
2479,12 → 2559,15
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
mutex_unlock(&dev->mode_config.mutex);
}
kfree(intel_dig_port);
}
2492,7 → 2575,6
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
.mode_fixup = intel_dp_mode_fixup,
.mode_set = intel_dp_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_dp_connector_funcs = {
2567,6 → 2649,7
 
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_dp->color_range_auto = true;
 
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
2756,7 → 2839,7
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
 
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
2768,15 → 2851,15
name = "DPDDC-A";
break;
case PORT_B:
dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
name = "DPDDC-B";
break;
case PORT_C:
dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
name = "DPDDC-C";
break;
case PORT_D:
dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
name = "DPDDC-D";
break;
default:
/drivers/video/drm/i915/intel_drv.h
118,6 → 118,11
* timings in the mode to prevent the crtc fixup from overwriting them.
* Currently only lvds needs that. */
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
/*
* Set when limited 16-235 (as opposed to full 0-255) RGB color range is
* to be used.
*/
#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
 
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
162,6 → 167,7
bool cloneable;
bool connectors_active;
void (*hot_plug)(struct intel_encoder *);
void (*pre_pll_enable)(struct intel_encoder *);
void (*pre_enable)(struct intel_encoder *);
void (*enable)(struct intel_encoder *);
void (*disable)(struct intel_encoder *);
214,6 → 220,7
* some outputs connected to this crtc.
*/
bool active;
bool eld_vld;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
237,6 → 244,9
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
 
/* reset counter value when the last flip was submitted */
unsigned int reset_counter;
};
 
struct intel_plane {
292,6 → 302,9
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
 
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
346,9 → 359,11
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
365,6 → 380,7
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
bool color_range_auto;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
386,6 → 402,7
struct intel_digital_port {
struct intel_encoder base;
enum port port;
u32 port_reversal;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
448,10 → 465,10
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern bool intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
511,12 → 528,12
bool mode_changed;
};
 
extern bool intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern int intel_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y, struct drm_framebuffer *old_fb);
extern void intel_modeset_disable(struct drm_device *dev);
extern void intel_crtc_restore_mode(struct drm_crtc *crtc);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_crtc_update_dpms(struct drm_crtc *crtc);
extern void intel_encoder_noop(struct drm_encoder *encoder);
extern void intel_encoder_destroy(struct drm_encoder *encoder);
extern void intel_encoder_dpms(struct intel_encoder *encoder, int mode);
extern bool intel_encoder_check_is_cloned(struct intel_encoder *encoder);
555,6 → 572,9
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
 
bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
struct intel_digital_port *port);
 
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
598,6 → 618,7
struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_initial_config(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
extern void intel_fbdev_set_suspend(struct drm_device *dev, int state);
extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
636,7 → 657,8
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
 
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
extern unsigned long intel_gen4_compute_page_offset(int *x, int *y,
unsigned int tiling_mode,
unsigned int bpp,
unsigned int pitch);
 
657,7 → 679,8
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
 
extern void intel_init_power_wells(struct drm_device *dev);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
/drivers/video/drm/i915/intel_dvo.c
345,7 → 345,6
static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
.mode_fixup = intel_dvo_mode_fixup,
.mode_set = intel_dvo_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_dvo_connector_funcs = {
/drivers/video/drm/i915/intel_fb.c
91,9 → 91,10
// .fb_debug_leave = drm_fb_helper_debug_leave,
};
 
static int intelfb_create(struct intel_fbdev *ifbdev,
static int intelfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
struct drm_device *dev = ifbdev->helper.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
186,8 → 187,7
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
 
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
223,26 → 223,10
return ret;
}
 
static int intel_fb_find_or_create_single(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct intel_fbdev *ifbdev = (struct intel_fbdev *)helper;
int new_fb = 0;
int ret;
 
if (!helper->fb) {
ret = intelfb_create(ifbdev, sizes);
if (ret)
return ret;
new_fb = 1;
}
return new_fb;
}
 
static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
.gamma_set = intel_crtc_fb_gamma_set,
.gamma_get = intel_crtc_fb_gamma_get,
.fb_probe = intel_fb_find_or_create_single,
.fb_probe = intelfb_create,
};
 
 
268,9 → 252,20
}
 
drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
drm_fb_helper_initial_config(&ifbdev->helper, 32);
 
return 0;
}
 
void intel_fbdev_initial_config(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
 
/* Due to peculiar init order wrt to hpd handling this is separate. */
drm_fb_helper_initial_config(&dev_priv->fbdev->helper, 32);
}
 
void intel_fb_output_poll_changed(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
}
/drivers/video/drm/i915/intel_hdmi.c
48,7 → 48,7
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
 
enabled_bits = IS_HASWELL(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
 
WARN(I915_READ(intel_hdmi->sdvox_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
331,6 → 331,7
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
340,8 → 341,15
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
 
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
if (intel_hdmi->rgb_quant_range_selectable) {
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
}
 
avi_if.body.avi.VIC = drm_match_cea_mode(adjusted_mode);
 
intel_set_infoframe(encoder, &avi_if);
}
 
364,7 → 372,8
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
391,11 → 400,11
return;
}
 
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
switch (intel_dig_port->port) {
case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
case SDVOC:
case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
default:
428,7 → 437,8
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
447,14 → 457,14
return;
}
 
switch (intel_hdmi->sdvox_reg) {
case HDMIB:
switch (intel_dig_port->port) {
case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
case HDMIC:
case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
case HDMID:
case PORT_D:
port = VIDEO_DIP_PORT_D;
break;
default:
766,46 → 776,38
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_hdmi->has_hdmi_sink &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
else
intel_hdmi->color_range = 0;
}
 
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
if (intel_hdmi->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
 
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
bit = HDMIB_HOTPLUG_LIVE_STATUS;
break;
case SDVOC:
bit = HDMIC_HOTPLUG_LIVE_STATUS;
break;
default:
bit = 0;
break;
return true;
}
 
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
 
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct drm_i915_private *dev_priv = dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
 
if (IS_G4X(connector->dev) && !g4x_hdmi_connected(intel_hdmi))
return status;
 
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
817,6 → 819,8
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
879,7 → 883,7
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
#if 0
 
if (property == dev_priv->force_audio_property) {
enum hdmi_force_audio i = val;
bool has_audio;
900,13 → 904,23
intel_hdmi->has_audio = has_audio;
goto done;
}
#endif
 
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_hdmi->color_range)
return 0;
 
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_hdmi->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done;
}
 
913,11 → 927,8
return -EINVAL;
 
done:
if (intel_dig_port->base.base.crtc) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
if (intel_dig_port->base.base.crtc)
intel_crtc_restore_mode(intel_dig_port->base.base.crtc);
 
return 0;
}
932,7 → 943,6
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
958,6 → 968,7
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
}
 
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
981,15 → 992,15
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |= PORTB_HOTPLUG_INT_STATUS;
break;
case PORT_C:
intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |= PORTC_HOTPLUG_INT_STATUS;
break;
case PORT_D:
intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
dev_priv->hotplug_supported_mask |= PORTD_HOTPLUG_INT_STATUS;
break;
case PORT_A:
/* Internal port only for eDP. */
1014,7 → 1025,7
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
 
if (IS_HASWELL(dev))
if (HAS_DDI(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
/drivers/video/drm/i915/intel_i2c.c
63,6 → 63,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS0, 0);
I915_WRITE(dev_priv->gpio_mmio_base + GMBUS4, 0);
}
 
static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
202,7 → 203,78
algo->data = bus;
}
 
/*
* gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
* mode. This results in spurious interrupt warnings if the legacy irq no. is
* shared with another device. The kernel then disables that interrupt source
* and so prevents the other device from working properly.
*/
#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
u32 gmbus4_irq_en)
{
int i;
int reg_offset = dev_priv->gpio_mmio_base;
u32 gmbus2 = 0;
DEFINE_WAIT(wait);
 
if (!HAS_GMBUS_IRQ(dev_priv->dev))
gmbus4_irq_en = 0;
 
/* Important: The hw handles only the first bit, so set only one! Since
* we also need to check for NAKs besides the hw ready/idle signal, we
* need to wake up periodically and check that ourselves. */
I915_WRITE(GMBUS4 + reg_offset, gmbus4_irq_en);
 
for (i = 0; i < msecs_to_jiffies(50) + 1; i++) {
prepare_to_wait(&dev_priv->gmbus_wait_queue, &wait,
TASK_UNINTERRUPTIBLE);
 
gmbus2 = I915_READ_NOTRACE(GMBUS2 + reg_offset);
if (gmbus2 & (GMBUS_SATOER | gmbus2_status))
break;
 
schedule_timeout(1);
}
finish_wait(&dev_priv->gmbus_wait_queue, &wait);
 
I915_WRITE(GMBUS4 + reg_offset, 0);
 
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
if (gmbus2 & gmbus2_status)
return 0;
return -ETIMEDOUT;
}
 
static int
gmbus_wait_idle(struct drm_i915_private *dev_priv)
{
int ret;
int reg_offset = dev_priv->gpio_mmio_base;
 
#define C ((I915_READ_NOTRACE(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0)
 
if (!HAS_GMBUS_IRQ(dev_priv->dev))
return wait_for(C, 10);
 
/* Important: The hw handles only the first bit, so set only one! */
I915_WRITE(GMBUS4 + reg_offset, GMBUS_IDLE_EN);
 
ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 10);
 
I915_WRITE(GMBUS4 + reg_offset, 0);
 
if (ret)
return 0;
else
return -ETIMEDOUT;
#undef C
}
 
static int
gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
u32 gmbus1_index)
{
219,15 → 291,11
while (len) {
int ret;
u32 val, loop = 0;
u32 gmbus2;
 
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY),
50);
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
GMBUS_HW_RDY_EN);
if (ret)
return -ETIMEDOUT;
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
return ret;
 
val = I915_READ(GMBUS3 + reg_offset);
do {
261,7 → 329,6
GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
u32 gmbus2;
 
val = loop = 0;
do {
270,13 → 337,10
 
I915_WRITE(GMBUS3 + reg_offset, val);
 
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_RDY),
50);
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_RDY,
GMBUS_HW_RDY_EN);
if (ret)
return -ETIMEDOUT;
if (gmbus2 & GMBUS_SATOER)
return -ENXIO;
return ret;
}
return 0;
}
345,8 → 409,6
I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
for (i = 0; i < num; i++) {
u32 gmbus2;
 
if (gmbus_is_index_read(msgs, i, num)) {
ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
i += 1; /* set i to the index of the read xfer */
361,13 → 423,12
if (ret == -ENXIO)
goto clear_err;
 
ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
(GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
50);
ret = gmbus_wait_hw_status(dev_priv, GMBUS_HW_WAIT_PHASE,
GMBUS_HW_WAIT_EN);
if (ret == -ENXIO)
goto clear_err;
if (ret)
goto timeout;
if (gmbus2 & GMBUS_SATOER)
goto clear_err;
}
 
/* Generate a STOP condition on the bus. Note that gmbus can't generata
380,8 → 441,7
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10)) {
if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
adapter->name);
ret = -ETIMEDOUT;
405,8 → 465,7
* it's slow responding and only answers on the 2nd retry.
*/
ret = -ENXIO;
if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
10)) {
if (gmbus_wait_idle(dev_priv)) {
DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
adapter->name);
ret = -ETIMEDOUT;
465,10 → 524,13
 
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else if (IS_VALLEYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;
 
mutex_init(&dev_priv->gmbus_mutex);
init_waitqueue_head(&dev_priv->gmbus_wait_queue);
 
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
/drivers/video/drm/i915/intel_lvds.c
51,7 → 51,8
 
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
bool is_dual_link;
u32 reg;
 
struct intel_lvds_connector *attached_connector;
};
71,16 → 72,11
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 lvds_reg, tmp;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
u32 tmp;
 
if (HAS_PCH_SPLIT(dev)) {
lvds_reg = PCH_LVDS;
} else {
lvds_reg = LVDS;
}
tmp = I915_READ(lvds_encoder->reg);
 
tmp = I915_READ(lvds_reg);
 
if (!(tmp & LVDS_PORT_EN))
return false;
 
92,6 → 88,91
return true;
}
 
/* The LVDS pin pair needs to be on before the DPLLs are enabled.
* This is an exception to the general rule that mode_set doesn't turn
* things on.
*/
static void intel_pre_pll_enable_lvds(struct intel_encoder *encoder)
{
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_display_mode *fixed_mode =
lvds_encoder->attached_connector->base.panel.fixed_mode;
int pipe = intel_crtc->pipe;
u32 temp;
 
temp = I915_READ(lvds_encoder->reg);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
 
if (HAS_PCH_CPT(dev)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
if (pipe == 1) {
temp |= LVDS_PIPEB_SELECT;
} else {
temp &= ~LVDS_PIPEB_SELECT;
}
}
 
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether we're going to
* set the DPLLs for dual-channel mode or not.
*/
if (lvds_encoder->is_dual_link)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
 
/* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
* appropriately here, but we need to look more thoroughly into how
* panels behave in the two modes.
*/
 
/* Set the dithering flag on LVDS as needed, note that there is no
* special lvds dither control bit on pch-split platforms, dithering is
* only controlled through the PIPECONF reg. */
if (INTEL_INFO(dev)->gen == 4) {
if (dev_priv->lvds_dither)
temp |= LVDS_ENABLE_DITHER;
else
temp &= ~LVDS_ENABLE_DITHER;
}
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
 
I915_WRITE(lvds_encoder->reg, temp);
}
 
static void intel_pre_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *enc = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (HAS_PCH_SPLIT(dev) || !enc->pfit_control)
return;
 
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
* adjusted whilst the pipe is disabled, according to
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
enc->pfit_control,
enc->pfit_pgm_ratios);
 
I915_WRITE(PFIT_PGM_RATIOS, enc->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, enc->pfit_control);
}
 
/**
* Sets the power state for the panel.
*/
101,38 → 182,20
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
u32 ctl_reg, stat_reg;
 
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
 
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
 
if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
* adjusted whilst the pipe is disabled, according to
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
lvds_encoder->pfit_control,
lvds_encoder->pfit_pgm_ratios);
 
I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
lvds_encoder->pfit_dirty = false;
}
 
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
POSTING_READ(lvds_reg);
POSTING_READ(lvds_encoder->reg);
if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000))
DRM_ERROR("timed out waiting for panel to power on\n");
 
144,15 → 207,13
struct drm_device *dev = encoder->base.dev;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
u32 ctl_reg, stat_reg;
 
if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
lvds_reg = PCH_LVDS;
stat_reg = PCH_PP_STATUS;
} else {
ctl_reg = PP_CONTROL;
lvds_reg = LVDS;
stat_reg = PP_STATUS;
}
 
162,15 → 223,10
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
 
if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
lvds_encoder->pfit_dirty = true;
I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_encoder->reg);
}
 
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
}
 
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
406,7 → 462,6
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
 
493,13 → 548,14
 
#if 0
/*
* Lid events. Note the use of 'modeset_on_lid':
* - we set it on lid close, and reset it on open
* Lid events. Note the use of 'modeset':
* - we set it to MODESET_ON_LID_OPEN on lid close,
* and set it to MODESET_DONE on open
* - we use it as a "only once" bit (ie we ignore
* duplicate events where it was already properly
* set/reset)
* - the suspend/resume paths will also set it to
* zero, since they restore the mode ("lid open").
* duplicate events where it was already properly set)
* - the suspend/resume paths will set it to
* MODESET_SUSPENDED and ignore the lid open event,
* because they restore the mode ("lid open").
*/
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
513,6 → 569,9
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
 
mutex_lock(&dev_priv->modeset_restore_lock);
if (dev_priv->modeset_restore == MODESET_SUSPENDED)
goto exit;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.
521,21 → 580,24
 
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
return NOTIFY_OK;
goto exit;
if (!acpi_lid_open()) {
dev_priv->modeset_on_lid = 1;
return NOTIFY_OK;
/* do modeset on next lid open event */
dev_priv->modeset_restore = MODESET_ON_LID_OPEN;
goto exit;
}
 
if (!dev_priv->modeset_on_lid)
return NOTIFY_OK;
if (dev_priv->modeset_restore == MODESET_DONE)
goto exit;
 
dev_priv->modeset_on_lid = 0;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
 
dev_priv->modeset_restore = MODESET_DONE;
 
exit:
mutex_unlock(&dev_priv->modeset_restore_lock);
return NOTIFY_OK;
}
#endif
591,8 → 653,7
* If the CRTC is enabled, the display will be changed
* according to the new panel fitting mode.
*/
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
intel_crtc_restore_mode(crtc);
}
}
 
602,7 → 663,6
static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
.mode_fixup = intel_lvds_mode_fixup,
.mode_set = intel_lvds_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
895,6 → 955,53
return false;
}
 
static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
{
DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
return 1;
}
bool intel_is_dual_link_lvds(struct drm_device *dev)
{
struct intel_encoder *encoder;
struct intel_lvds_encoder *lvds_encoder;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->type == INTEL_OUTPUT_LVDS) {
lvds_encoder = to_lvds_encoder(&encoder->base);
 
return lvds_encoder->is_dual_link;
}
}
 
return false;
}
 
static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
{
struct drm_device *dev = lvds_encoder->base.base.dev;
unsigned int val;
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* use the module option value if specified */
if (i915_lvds_channel_mode > 0)
return i915_lvds_channel_mode == 2;
 
// if (dmi_check_system(intel_dual_link_lvds))
// return true;
 
/* BIOS should set the proper LVDS register value at boot, but
* in reality, it doesn't set the value when the lid is closed;
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
val = I915_READ(lvds_encoder->reg);
if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
val = dev_priv->bios_lvds_val;
 
return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
}
 
static bool intel_lvds_supported(struct drm_device *dev)
{
/* With the introduction of the PCH we gained a dedicated
980,6 → 1087,8
DRM_MODE_ENCODER_LVDS);
 
intel_encoder->enable = intel_enable_lvds;
intel_encoder->pre_enable = intel_pre_enable_lvds;
intel_encoder->pre_pll_enable = intel_pre_pll_enable_lvds;
intel_encoder->disable = intel_disable_lvds;
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
1001,6 → 1110,12
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
 
if (HAS_PCH_SPLIT(dev)) {
lvds_encoder->reg = PCH_LVDS;
} else {
lvds_encoder->reg = LVDS;
}
 
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
drm_object_attach_property(&connector->base,
1101,6 → 1216,10
goto failed;
 
out:
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
 
/*
* Unlock registers and just
* leave them unlocked
/drivers/video/drm/i915/intel_modes.c
28,7 → 28,6
#include <linux/fb.h>
#include <drm/drm_edid.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include "intel_drv.h"
#include "i915_drv.h"
 
85,7 → 84,7
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
#if 0
 
prop = dev_priv->force_audio_property;
if (prop == NULL) {
prop = drm_property_create_enum(dev, 0,
98,12 → 97,12
dev_priv->force_audio_property = prop;
}
drm_object_attach_property(&connector->base, prop, 0);
#endif
}
 
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
{ 0, "Full" },
{ 1, "Limited 16:235" },
{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
{ INTEL_BROADCAST_RGB_FULL, "Full" },
{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
 
void
112,7 → 111,7
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_property *prop;
#if 0
 
prop = dev_priv->broadcast_rgb_property;
if (prop == NULL) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM,
126,5 → 125,4
}
 
drm_object_attach_property(&connector->base, prop, 0);
#endif
}
/drivers/video/drm/i915/intel_pm.c
470,12 → 470,6
dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
goto out_disable;
}
if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
(crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
509,6 → 503,14
if (in_dbg_master())
goto out_disable;
 
if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
DRM_INFO("not enough stolen space for compressed buffer (need %zd bytes), disabling\n", intel_fb->obj->base.size);
DRM_INFO("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
 
/* If the scanout has not changed, don't modify the FBC settings.
* Note that we make the fundamental assumption that the fb->obj
* cannot be unpinned (and have its GTT offset and fence revoked)
556,6 → 558,7
DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
intel_disable_fbc(dev);
}
i915_gem_stolen_cleanup_compression(dev);
}
 
static void i915_pineview_get_mem_freq(struct drm_device *dev)
2309,7 → 2312,6
i915_gem_object_unpin(ctx);
err_unref:
drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
 
2595,7 → 2597,7
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
 
/* Check if we are enabling RC6 */
3465,6 → 3467,7
ironlake_disable_rc6(dev);
} else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
gen6_disable_rps(dev);
mutex_unlock(&dev_priv->rps.hw_lock);
}
}
 
3590,6 → 3593,19
}
}
 
static void gen6_check_mch_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
 
tmp = I915_READ(MCH_SSKPD);
if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
DRM_INFO("This can cause pipe underruns and display issues.\n");
DRM_INFO("Please upgrade your BIOS to fix this.\n");
}
}
 
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
3682,6 → 3698,8
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
 
cpt_init_clock_gating(dev);
 
gen6_check_mch_setup(dev);
}
 
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3693,6 → 3711,10
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
 
/* WaVSRefCountFullforceMissDisable */
if (IS_HASWELL(dev_priv->dev))
reg &= ~GEN7_FF_VS_REF_CNT_FFME;
 
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
 
3863,6 → 3885,8
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
cpt_init_clock_gating(dev);
 
gen6_check_mch_setup(dev);
}
 
static void valleyview_init_clock_gating(struct drm_device *dev)
4056,35 → 4080,57
dev_priv->display.init_clock_gating(dev);
}
 
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
void intel_init_power_wells(struct drm_device *dev)
void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long power_wells[] = {
HSW_PWR_WELL_CTL1,
HSW_PWR_WELL_CTL2,
HSW_PWR_WELL_CTL4
};
int i;
bool is_enabled, enable_requested;
uint32_t tmp;
 
if (!IS_HASWELL(dev))
return;
 
mutex_lock(&dev->struct_mutex);
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
 
for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
int well = I915_READ(power_wells[i]);
if (enable) {
if (!enable_requested)
I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
 
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Timeout enabling power well\n");
}
} else {
if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
}
 
mutex_unlock(&dev->struct_mutex);
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
void intel_init_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!IS_HASWELL(dev))
return;
 
/* For now, we need the power well to be always enabled. */
intel_set_power_well(dev, true);
 
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
 
/* Set up chip specific power management-related functions */
/drivers/video/drm/i915/intel_ringbuffer.c
320,6 → 320,7
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
333,7 → 334,7
 
intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
intel_ring_emit(ring, flags);
intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, scratch_addr);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
465,6 → 466,9
if (pc->cpu_page == NULL)
goto err_unpin;
 
DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
ring->name, pc->gtt_offset);
 
pc->obj = obj;
ring->private = pc;
return 0;
556,6 → 560,8
 
static void render_ring_cleanup(struct intel_ring_buffer *ring)
{
struct drm_device *dev = ring->dev;
 
if (!ring->private)
return;
 
605,6 → 611,13
return 0;
}
 
static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
u32 seqno)
{
struct drm_i915_private *dev_priv = dev->dev_private;
return dev_priv->last_seqno < seqno;
}
 
/**
* intel_ring_sync - sync the waiter to the signaller on seqno
*
635,11 → 648,20
if (ret)
return ret;
 
/* If seqno wrap happened, omit the wait with no-ops */
if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
intel_ring_emit(waiter,
dw1 | signaller->semaphore_register[waiter->id]);
dw1 |
signaller->semaphore_register[waiter->id]);
intel_ring_emit(waiter, seqno);
intel_ring_emit(waiter, 0);
intel_ring_emit(waiter, MI_NOOP);
} else {
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
intel_ring_emit(waiter, MI_NOOP);
}
intel_ring_advance(waiter);
 
return 0;
720,6 → 742,12
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
 
static void
ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
}
 
static u32
pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
{
727,6 → 755,13
return pc->cpu_page[0];
}
 
static void
pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct pipe_control *pc = ring->private;
pc->cpu_page[0] = seqno;
}
 
static bool
gen5_ring_get_irq(struct intel_ring_buffer *ring)
{
1156,6 → 1191,10
return ret;
}
 
obj = NULL;
if (!HAS_LLC(dev))
obj = i915_gem_object_create_stolen(dev, ring->size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, ring->size);
if (obj == NULL) {
DRM_ERROR("Failed to allocate ringbuffer\n");
1174,7 → 1213,7
goto err_unpin;
 
ring->virtual_start =
ioremap(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
1197,7 → 1236,7
return 0;
 
err_unmap:
FreeKernelSpace(ring->virtual_start);
iounmap(ring->virtual_start);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
1225,7 → 1264,7
 
I915_WRITE_CTL(ring, 0);
 
// drm_core_ioremapfree(&ring->map, ring->dev);
iounmap(ring->virtual_start);
 
i915_gem_object_unpin(ring->obj);
drm_gem_object_unreference(&ring->obj->base);
1334,7 → 1373,8
 
msleep(1);
 
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(GetTimerTicks(), end));
1396,14 → 1436,35
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
 
static int __intel_ring_begin(struct intel_ring_buffer *ring,
int bytes)
{
int ret;
 
if (unlikely(ring->tail + bytes > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
}
 
if (unlikely(ring->space < bytes)) {
ret = ring_wait_for_space(ring, bytes);
if (unlikely(ret))
return ret;
}
 
ring->space -= bytes;
return 0;
}
 
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int n = 4*num_dwords;
int ret;
 
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
 
1412,20 → 1473,21
if (ret)
return ret;
 
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
return ret;
return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
}
 
if (unlikely(ring->space < n)) {
ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
BUG_ON(ring->outstanding_lazy_request);
 
if (INTEL_INFO(ring->dev)->gen >= 6) {
I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
}
 
ring->space -= n;
return 0;
ring->set_seqno(ring, seqno);
}
 
void intel_ring_advance(struct intel_ring_buffer *ring)
1433,7 → 1495,7
struct drm_i915_private *dev_priv = ring->dev->dev_private;
 
ring->tail &= ring->size - 1;
if (dev_priv->stop_rings & intel_ring_flag(ring))
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}
1590,6 → 1652,7
ring->irq_put = gen6_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->sync_to = gen6_ring_sync;
ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1600,6 → 1663,7
ring->add_request = pc_render_add_request;
ring->flush = gen4_render_ring_flush;
ring->get_seqno = pc_render_get_seqno;
ring->set_seqno = pc_render_set_seqno;
ring->irq_get = gen5_ring_get_irq;
ring->irq_put = gen5_ring_put_irq;
ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1610,6 → 1674,7
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
1682,6 → 1747,7
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
1743,6 → 1809,7
ring->flush = gen6_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
1758,6 → 1825,7
ring->flush = bsd_ring_flush;
ring->add_request = i9xx_add_request;
ring->get_seqno = ring_get_seqno;
ring->set_seqno = ring_set_seqno;
if (IS_GEN5(dev)) {
ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
ring->irq_get = gen5_ring_get_irq;
1787,6 → 1855,7
ring->flush = blt_ring_flush;
ring->add_request = gen6_add_request;
ring->get_seqno = gen6_ring_get_seqno;
ring->set_seqno = ring_set_seqno;
ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
ring->irq_get = gen6_ring_get_irq;
ring->irq_put = gen6_ring_put_irq;
/drivers/video/drm/i915/intel_ringbuffer.h
90,6 → 90,8
*/
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
void (*set_seqno)(struct intel_ring_buffer *ring,
u32 seqno);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags);
178,6 → 180,13
return ring->status_page.page_addr[reg];
}
 
static inline void
intel_write_status_page(struct intel_ring_buffer *ring,
int reg, u32 value)
{
ring->status_page.page_addr[reg] = value;
}
 
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
208,7 → 217,7
}
void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
/drivers/video/drm/i915/intel_sdvo.c
112,6 → 112,7
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
bool color_range_auto;
 
/**
* This is set if we're going to treat the device as TV-out.
134,6 → 135,7
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
 
/**
* This is set if we detect output of sdvo device as LVDS and
955,7 → 957,8
&tx_rate, 1);
}
 
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
964,6 → 967,13
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
 
if (intel_sdvo->rgb_quant_range_selectable) {
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
}
 
intel_dip_infoframe_csum(&avi_if);
 
/* sdvo spec says that the ecc is handled by the hw, and it looks like
1073,6 → 1083,18
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
 
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_sdvo->has_hdmi_monitor &&
drm_match_cea_mode(adjusted_mode) > 1)
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
else
intel_sdvo->color_range = 0;
}
 
if (intel_sdvo->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
 
return true;
}
 
1130,7 → 1152,7
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo);
intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
 
1162,7 → 1184,7
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (intel_sdvo->is_hdmi)
if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
1522,6 → 1544,8
if (intel_sdvo->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
intel_sdvo->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
1573,6 → 1597,7
 
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
intel_sdvo->rgb_quant_range_selectable = false;
 
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
1884,7 → 1909,6
if (ret)
return ret;
 
#if 0
if (property == dev_priv->force_audio_property) {
int i = val;
bool has_audio;
1907,13 → 1931,23
}
 
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_sdvo->color_range)
return 0;
 
intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_sdvo->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_sdvo->color_range_auto = false;
intel_sdvo->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_sdvo->color_range_auto = false;
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done;
}
#endif
 
#define CHECK_PROPERTY(name, NAME) \
if (intel_sdvo_connector->name == property) { \
2008,11 → 2042,8
 
 
done:
if (intel_sdvo->base.base.crtc) {
struct drm_crtc *crtc = intel_sdvo->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
if (intel_sdvo->base.base.crtc)
intel_crtc_restore_mode(intel_sdvo->base.base.crtc);
 
return 0;
#undef CHECK_PROPERTY
2021,7 → 2052,6
static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
.mode_fixup = intel_sdvo_mode_fixup,
.mode_set = intel_sdvo_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2211,14 → 2241,17
}
 
static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
 
intel_attach_force_audio_property(&connector->base.base);
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
}
 
static bool
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2265,7 → 2298,7
 
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
 
return true;
}
/drivers/video/drm/i915/intel_sprite.c
50,6 → 50,7
u32 sprctl, sprscale = 0;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
sprctl = I915_READ(SPRCTL(pipe));
 
89,6 → 90,9
sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
sprctl |= SPRITE_ENABLE;
 
if (IS_HASWELL(dev))
sprctl |= SPRITE_PIPE_CSC_ENABLE;
 
/* Sizes are 0 based */
src_w--;
src_h--;
103,19 → 107,15
* when scaling is disabled.
*/
if (crtc_w != src_w || crtc_h != src_h) {
if (!dev_priv->sprite_scaling_enabled) {
dev_priv->sprite_scaling_enabled = true;
dev_priv->sprite_scaling_enabled |= 1 << pipe;
 
if (!scaling_was_enabled) {
intel_update_watermarks(dev);
intel_wait_for_vblank(dev, pipe);
}
sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
} else {
if (dev_priv->sprite_scaling_enabled) {
dev_priv->sprite_scaling_enabled = false;
/* potentially re-enable LP watermarks */
intel_update_watermarks(dev);
}
}
} else
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
122,7 → 122,7
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
141,6 → 141,10
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
 
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
intel_update_watermarks(dev);
}
 
static void
150,6 → 154,7
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
bool scaling_was_enabled = dev_priv->sprite_scaling_enabled;
 
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
159,7 → 164,10
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
POSTING_READ(SPRSURF(pipe));
 
dev_priv->sprite_scaling_enabled = false;
dev_priv->sprite_scaling_enabled &= ~(1 << pipe);
 
/* potentially re-enable LP watermarks */
if (scaling_was_enabled && !dev_priv->sprite_scaling_enabled)
intel_update_watermarks(dev);
}
 
287,7 → 295,7
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
intel_gen4_compute_page_offset(&x, &y, obj->tiling_mode,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
 
591,7 → 599,7
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
return -EINVAL;
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
604,7 → 612,7
ret = intel_plane->update_colorkey(plane, set);
 
out_unlock:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
618,7 → 626,7
int ret = 0;
 
 
mutex_lock(&dev->mode_config.mutex);
drm_modeset_lock_all(dev);
 
obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
if (!obj) {
631,7 → 639,7
intel_plane->get_colorkey(plane, get);
 
out_unlock:
mutex_unlock(&dev->mode_config.mutex);
drm_modeset_unlock_all(dev);
return ret;
}
 
/drivers/video/drm/i915/kms_display.c
480,7 → 480,7
/* You don't need to worry about fragmentation issues.
* GTT space is continuous. I guarantee it. */
 
mapped = bits = (u32*)MapIoMem(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
mapped = bits = (u32*)MapIoMem(dev_priv->gtt.mappable_base + obj->gtt_offset,
CURSOR_WIDTH*CURSOR_HEIGHT*4, PG_SW);
 
if (unlikely(bits == NULL))
681,6 → 681,12
u32 slot;
int ret;
 
if(mask->handle == -2)
{
printf("%s handle %d\n", __FUNCTION__, mask->handle);
return 0;
}
 
obj = drm_gem_object_lookup(dev, file, mask->handle);
if (obj == NULL)
return -ENOENT;
883,6 → 889,12
return 1;
};
 
bool queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
return __queue_work(wq, work);
}
 
 
void __stdcall delayed_work_timer_fn(unsigned long __data)
{
struct delayed_work *dwork = (struct delayed_work *)__data;
962,4 → 974,61
}
 
 
void
prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
 
// wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
 
/**
* finish_wait - clean up after waiting in a queue
* @q: waitqueue waited on
* @wait: wait descriptor
*
* Sets current thread back to running state and removes
* the wait descriptor from the given waitqueue if still
* queued.
*/
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
 
// __set_current_state(TASK_RUNNING);
/*
* We can check for list emptiness outside the lock
* IFF:
* - we use the "careful" check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPU's that we haven't seen yet (and that might
* still change the stack area.
* and
* - all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags);
}
 
DestroyEvent(wait->evnt);
}
 
int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
{
list_del_init(&wait->task_list);
return 1;
}
 
 
 
 
 
/drivers/video/drm/i915/main.c
57,7 → 57,7
 
int i915_modeset = 1;
 
u32_t drvEntry(int action, char *cmdline)
u32_t __attribute__((externally_visible)) drvEntry(int action, char *cmdline)
{
 
int err = 0;
82,10 → 82,10
return 0;
};
}
dbgprintf("i915 RC 10.5\n cmdline: %s\n", cmdline);
dbgprintf(" i915 v3.9-rc8\n cmdline: %s\n", cmdline);
 
cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size);
// dbgprintf("\ncache line size %d\n", x86_clflush_size);
 
enum_pci_devices();
 
105,6 → 105,14
return err;
};
 
//int __declspec(dllexport) DllMain(int, char*) __attribute__ ((weak, alias ("drvEntry")));
 
//int __declspec(dllexport) DllMain( int hinstDLL, int fdwReason, void *lpReserved )
//{
//
// return 1;
//}
 
#define CURRENT_API 0x0200 /* 2.00 */
#define COMPATIBLE_API 0x0100 /* 1.00 */
 
138,7 → 146,7
#define SRV_I915_GEM_BUSY 28
#define SRV_I915_GEM_SET_DOMAIN 29
#define SRV_I915_GEM_MMAP 30
 
#define SRV_I915_GEM_MMAP_GTT 31
#define SRV_I915_GEM_THROTTLE 32
#define SRV_FBINFO 33
#define SRV_I915_GEM_EXECBUFFER2 34
267,6 → 275,11
retval = i915_gem_mmap_ioctl(main_device, inp, file);
break;
 
case SRV_I915_GEM_MMAP_GTT:
retval = i915_gem_mmap_gtt_ioctl(main_device, inp, file);
break;
 
 
case SRV_FBINFO:
retval = i915_fbinfo(inp);
break;
/drivers/video/drm/i915/utils.c
4,6 → 4,7
#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_drv.h"
#include <linux/hdmi.h>
 
 
struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
102,3 → 103,410
if(filep->pages)
kfree(filep->pages);
}
 
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
*
* Returns 0 on success or a negative error code on failure.
*/
int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
 
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = 13;
 
return 0;
}
 
 
static void *check_bytes8(const u8 *start, u8 value, unsigned int bytes)
{
while (bytes) {
if (*start != value)
return (void *)start;
start++;
bytes--;
}
return NULL;
}
 
/**
* memchr_inv - Find an unmatching character in an area of memory.
* @start: The memory area
* @c: Find a character other than c
* @bytes: The size of the area.
*
* returns the address of the first character other than @c, or %NULL
* if the whole buffer contains just @c.
*/
void *memchr_inv(const void *start, int c, size_t bytes)
{
u8 value = c;
u64 value64;
unsigned int words, prefix;
 
if (bytes <= 16)
return check_bytes8(start, value, bytes);
 
value64 = value;
#if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64
value64 *= 0x0101010101010101;
#elif defined(ARCH_HAS_FAST_MULTIPLIER)
value64 *= 0x01010101;
value64 |= value64 << 32;
#else
value64 |= value64 << 8;
value64 |= value64 << 16;
value64 |= value64 << 32;
#endif
 
prefix = (unsigned long)start % 8;
if (prefix) {
u8 *r;
 
prefix = 8 - prefix;
r = check_bytes8(start, value, prefix);
if (r)
return r;
start += prefix;
bytes -= prefix;
}
 
words = bytes / 8;
 
while (words) {
if (*(u64 *)start != value64)
return check_bytes8(start, value, 8);
start += 8;
words--;
}
 
return check_bytes8(start, value, bytes % 8);
}
 
 
 
int dma_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, int dir)
{
struct scatterlist *s;
int i;
 
for_each_sg(sglist, s, nelems, i) {
s->dma_address = (dma_addr_t)sg_phys(s);
#ifdef CONFIG_NEED_SG_DMA_LENGTH
s->dma_length = s->length;
#endif
}
 
return nelems;
}
 
 
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
{
int i;
 
i = vsnprintf(buf, size, fmt, args);
 
if (likely(i < size))
return i;
if (size != 0)
return size - 1;
return 0;
}
 
 
int scnprintf(char *buf, size_t size, const char *fmt, ...)
{
va_list args;
int i;
 
va_start(args, fmt);
i = vscnprintf(buf, size, fmt, args);
va_end(args);
 
return i;
}
 
 
 
#define _U 0x01 /* upper */
#define _L 0x02 /* lower */
#define _D 0x04 /* digit */
#define _C 0x08 /* cntrl */
#define _P 0x10 /* punct */
#define _S 0x20 /* white space (space/lf/tab) */
#define _X 0x40 /* hex digit */
#define _SP 0x80 /* hard space (0x20) */
 
extern const unsigned char _ctype[];
 
#define __ismask(x) (_ctype[(int)(unsigned char)(x)])
 
#define isalnum(c) ((__ismask(c)&(_U|_L|_D)) != 0)
#define isalpha(c) ((__ismask(c)&(_U|_L)) != 0)
#define iscntrl(c) ((__ismask(c)&(_C)) != 0)
#define isdigit(c) ((__ismask(c)&(_D)) != 0)
#define isgraph(c) ((__ismask(c)&(_P|_U|_L|_D)) != 0)
#define islower(c) ((__ismask(c)&(_L)) != 0)
#define isprint(c) ((__ismask(c)&(_P|_U|_L|_D|_SP)) != 0)
#define ispunct(c) ((__ismask(c)&(_P)) != 0)
/* Note: isspace() must return false for %NUL-terminator */
#define isspace(c) ((__ismask(c)&(_S)) != 0)
#define isupper(c) ((__ismask(c)&(_U)) != 0)
#define isxdigit(c) ((__ismask(c)&(_D|_X)) != 0)
 
#define isascii(c) (((unsigned char)(c))<=0x7f)
#define toascii(c) (((unsigned char)(c))&0x7f)
 
static inline unsigned char __tolower(unsigned char c)
{
if (isupper(c))
c -= 'A'-'a';
return c;
}
 
static inline unsigned char __toupper(unsigned char c)
{
if (islower(c))
c -= 'a'-'A';
return c;
}
 
#define tolower(c) __tolower(c)
#define toupper(c) __toupper(c)
 
/*
* Fast implementation of tolower() for internal usage. Do not use in your
* code.
*/
static inline char _tolower(const char c)
{
return c | 0x20;
}
 
 
 
//const char hex_asc[] = "0123456789abcdef";
 
/**
* hex_to_bin - convert a hex digit to its real value
* @ch: ascii character represents hex digit
*
* hex_to_bin() converts one hex digit to its actual value or -1 in case of bad
* input.
*/
int hex_to_bin(char ch)
{
if ((ch >= '0') && (ch <= '9'))
return ch - '0';
ch = tolower(ch);
if ((ch >= 'a') && (ch <= 'f'))
return ch - 'a' + 10;
return -1;
}
EXPORT_SYMBOL(hex_to_bin);
 
/**
* hex2bin - convert an ascii hexadecimal string to its binary representation
* @dst: binary result
* @src: ascii hexadecimal string
* @count: result length
*
* Return 0 on success, -1 in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
while (count--) {
int hi = hex_to_bin(*src++);
int lo = hex_to_bin(*src++);
 
if ((hi < 0) || (lo < 0))
return -1;
 
*dst++ = (hi << 4) | lo;
}
return 0;
}
EXPORT_SYMBOL(hex2bin);
 
/**
* hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @linebuf: where to put the converted data
* @linebuflen: total size of @linebuf, including space for terminating NUL
* @ascii: include ASCII after the hex output
*
* hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
*
* Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
* to a hex + ASCII dump at the supplied memory location.
* The converted output is always NUL-terminated.
*
* E.g.:
* hex_dump_to_buffer(frame->data, frame->len, 16, 1,
* linebuf, sizeof(linebuf), true);
*
* example output buffer:
* 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
*/
void hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
int groupsize, char *linebuf, size_t linebuflen,
bool ascii)
{
const u8 *ptr = buf;
u8 ch;
int j, lx = 0;
int ascii_column;
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
if (!len)
goto nil;
if (len > rowsize) /* limit to one line at a time */
len = rowsize;
if ((len % groupsize) != 0) /* no mixed size output */
groupsize = 1;
 
switch (groupsize) {
case 8: {
const u64 *ptr8 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%16.16llx", j ? " " : "",
(unsigned long long)*(ptr8 + j));
ascii_column = 17 * ngroups + 2;
break;
}
 
case 4: {
const u32 *ptr4 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%8.8x", j ? " " : "", *(ptr4 + j));
ascii_column = 9 * ngroups + 2;
break;
}
 
case 2: {
const u16 *ptr2 = buf;
int ngroups = len / groupsize;
 
for (j = 0; j < ngroups; j++)
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s%4.4x", j ? " " : "", *(ptr2 + j));
ascii_column = 5 * ngroups + 2;
break;
}
 
default:
for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = hex_asc_hi(ch);
linebuf[lx++] = hex_asc_lo(ch);
linebuf[lx++] = ' ';
}
if (j)
lx--;
 
ascii_column = 3 * rowsize + 2;
break;
}
if (!ascii)
goto nil;
 
while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
linebuf[lx++] = ' ';
for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) {
ch = ptr[j];
linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
}
nil:
linebuf[lx++] = '\0';
}
 
/**
* print_hex_dump - print a text hex dump to syslog for a binary blob of data
* @level: kernel log level (e.g. KERN_DEBUG)
* @prefix_str: string to prefix each line with;
* caller supplies trailing spaces for alignment if desired
* @prefix_type: controls whether prefix of an offset, address, or none
* is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
* @rowsize: number of bytes to print per line; must be 16 or 32
* @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
* @buf: data blob to dump
* @len: number of bytes in the @buf
* @ascii: include ASCII after the hex output
*
* Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
* to the kernel log at the specified kernel log level, with an optional
* leading prefix.
*
* print_hex_dump() works on one "line" of output at a time, i.e.,
* 16 or 32 bytes of input data converted to hex + ASCII output.
* print_hex_dump() iterates over the entire input @buf, breaking it into
* "line size" chunks to format and print.
*
* E.g.:
* print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
* 16, 1, frame->data, frame->len, true);
*
* Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
* 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
* Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
* ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
*/
void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
int rowsize, int groupsize,
const void *buf, size_t len, bool ascii)
{
const u8 *ptr = buf;
int i, linelen, remaining = len;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
 
if (rowsize != 16 && rowsize != 32)
rowsize = 16;
 
for (i = 0; i < len; i += rowsize) {
linelen = min(remaining, rowsize);
remaining -= rowsize;
 
hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
linebuf, sizeof(linebuf), ascii);
 
switch (prefix_type) {
case DUMP_PREFIX_ADDRESS:
printk("%s%s%p: %s\n",
level, prefix_str, ptr + i, linebuf);
break;
case DUMP_PREFIX_OFFSET:
printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
break;
default:
printk("%s%s%s\n", level, prefix_str, linebuf);
break;
}
}
}
 
void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
const void *buf, size_t len)
{
print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, 16, 1,
buf, len, true);
}