Subversion Repositories Kolibri OS

Compare Revisions

Regard whitespace Rev 3242 → Rev 3243

/drivers/include/drm/drmP.h
60,6 → 60,7
//#include <linux/file.h>
#include <linux/pci.h>
#include <linux/jiffies.h>
#include <linux/irqreturn.h>
//#include <linux/smp_lock.h> /* For (un)lock_kernel */
//#include <linux/dma-mapping.h>
//#include <linux/mm.h>
170,7 → 171,7
/** \name Begin the DRM... */
/*@{*/
 
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then
#define DRM_DEBUG_CODE 0 /**< Include debugging code if > 1, then
also include looping detection. */
 
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */
965,6 → 966,15
 
#endif
 
#define DRM_IRQ_ARGS int irq, void *arg
 
struct drm_driver {
irqreturn_t (*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
};
 
 
#define DRM_MINOR_UNASSIGNED 0
#define DRM_MINOR_LEGACY 1
#define DRM_MINOR_CONTROL 2
1172,7 → 1182,7
// struct drm_sigdata sigdata; /**< For block_all_signals */
// sigset_t sigmask;
 
// struct drm_driver *driver;
struct drm_driver *driver;
// struct drm_local_map *agp_buffer_map;
// unsigned int agp_buffer_token;
// struct drm_minor *control; /**< Control node for card */
/drivers/include/drm/drm_global.h
0,0 → 1,53
/**************************************************************************
*
* Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
 
#ifndef _DRM_GLOBAL_H_
#define _DRM_GLOBAL_H_
enum drm_global_types {
DRM_GLOBAL_TTM_MEM = 0,
DRM_GLOBAL_TTM_BO,
DRM_GLOBAL_TTM_OBJECT,
DRM_GLOBAL_NUM
};
 
struct drm_global_reference {
enum drm_global_types global_type;
size_t size;
void *object;
int (*init) (struct drm_global_reference *);
void (*release) (struct drm_global_reference *);
};
 
extern void drm_global_init(void);
extern void drm_global_release(void);
extern int drm_global_item_ref(struct drm_global_reference *ref);
extern void drm_global_item_unref(struct drm_global_reference *ref);
 
#endif
/drivers/include/drm/intel-gtt.h
19,6 → 19,7
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* for ppgtt PDE access */
u32 __iomem *gtt;
/* needed for ioremap in drm/i915 */
32,7 → 33,8
bool intel_enable_gtt(void);
 
void intel_gtt_chipset_flush(void);
void intel_gtt_insert_sg_entries(struct pagelist *st, unsigned int pg_start,
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags);
void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
 
/drivers/include/linux/asm/scatterlist.h
0,0 → 1,34
#ifndef __ASM_GENERIC_SCATTERLIST_H
#define __ASM_GENERIC_SCATTERLIST_H
 
#include <linux/types.h>
 
struct scatterlist {
#ifdef CONFIG_DEBUG_SG
unsigned long sg_magic;
#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
#ifdef CONFIG_NEED_SG_DMA_LENGTH
unsigned int dma_length;
#endif
};
 
/*
* These macros should be used after a dma_map_sg call has been done
* to get bus addresses of each of the SG entries and their lengths.
* You should only work with the number of sg entries pci_map_sg
* returns, or alternatively stop on the first sg_dma_len(sg) which
* is 0.
*/
#define sg_dma_address(sg) ((sg)->dma_address)
 
#ifdef CONFIG_NEED_SG_DMA_LENGTH
#define sg_dma_len(sg) ((sg)->dma_length)
#else
#define sg_dma_len(sg) ((sg)->length)
#endif
 
#endif /* __ASM_GENERIC_SCATTERLIST_H */
/drivers/include/linux/compiler-gcc4.h
63,3 → 63,13
#define __compiletime_warning(message) __attribute__((warning(message)))
#define __compiletime_error(message) __attribute__((error(message)))
#endif
 
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if __GNUC_MINOR__ >= 4
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
#endif
#if __GNUC_MINOR__ >= 8 || (defined(__powerpc__) && __GNUC_MINOR__ >= 6)
#define __HAVE_BUILTIN_BSWAP16__
#endif
#endif
/drivers/include/linux/compiler.h
10,6 → 10,7
# define __force __attribute__((force))
# define __nocast __attribute__((nocast))
# define __iomem __attribute__((noderef, address_space(2)))
# define __must_hold(x) __attribute__((context(x,1,1)))
# define __acquires(x) __attribute__((context(x,0,1)))
# define __releases(x) __attribute__((context(x,1,0)))
# define __acquire(x) __context__(x,1)
33,6 → 34,7
# define __chk_user_ptr(x) (void)0
# define __chk_io_ptr(x) (void)0
# define __builtin_warning(x, y...) (1)
# define __must_hold(x)
# define __acquires(x)
# define __releases(x)
# define __acquire(x) (void)0
42,6 → 44,10
# define __rcu
#endif
 
/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
#define ___PASTE(a,b) a##b
#define __PASTE(a,b) ___PASTE(a,b)
 
#ifdef __KERNEL__
 
#ifdef __GNUC__
/drivers/include/linux/err.h
0,0 → 1,65
#ifndef _LINUX_ERR_H
#define _LINUX_ERR_H
 
#include <linux/compiler.h>
 
#include <errno.h>
 
/*
* Kernel pointers have redundant information, so we can use a
* scheme where we can return either an error code or a dentry
* pointer with the same return value.
*
* This should be a per-architecture thing, to allow different
* error and pointer decisions.
*/
#define MAX_ERRNO 4095
 
#ifndef __ASSEMBLY__
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
static inline void * __must_check ERR_PTR(long error)
{
return (void *) error;
}
 
static inline long __must_check PTR_ERR(const void *ptr)
{
return (long) ptr;
}
 
static inline long __must_check IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
 
static inline long __must_check IS_ERR_OR_NULL(const void *ptr)
{
return !ptr || IS_ERR_VALUE((unsigned long)ptr);
}
 
/**
* ERR_CAST - Explicitly cast an error-valued pointer to another pointer type
* @ptr: The pointer to cast.
*
* Explicitly cast an error-valued pointer to another pointer type in such a
* way as to make it clear that's what's going on.
*/
static inline void * __must_check ERR_CAST(const void *ptr)
{
/* cast away the const */
return (void *) ptr;
}
 
static inline int __must_check PTR_RET(const void *ptr)
{
if (IS_ERR(ptr))
return PTR_ERR(ptr);
else
return 0;
}
 
#endif
 
#endif /* _LINUX_ERR_H */
/drivers/include/linux/i2c.h
172,6 → 172,7
* @platform_data: stored in i2c_client.dev.platform_data
* @archdata: copied into i2c_client.dev.archdata
* @of_node: pointer to OpenFirmware device node
* @acpi_node: ACPI device node
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
/drivers/include/linux/kernel.h
331,47 → 331,15
#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
 
 
struct scatterlist {
unsigned long page_link;
unsigned int offset;
unsigned int length;
dma_addr_t dma_address;
unsigned int dma_length;
};
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
 
#define SG_MAX_SINGLE_ALLOC (4096 / sizeof(struct scatterlist))
 
struct scatterlist *sg_next(struct scatterlist *sg);
 
#define sg_dma_address(sg) ((sg)->dma_address)
#define sg_dma_len(sg) ((sg)->length)
 
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
 
static inline addr_t sg_page(struct scatterlist *sg)
{
return (addr_t)((sg)->page_link & ~0x3);
}
 
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 
 
 
struct page
{
unsigned int addr;
};
 
#define page_to_phys(page) ((dma_addr_t)(page))
 
struct vm_fault {
unsigned int flags; /* FAULT_FLAG_xxx flags */
390,5 → 358,9
unsigned int nents;
};
 
#define page_cache_release(page) FreePage((addr_t)(page))
 
#define alloc_page(gfp_mask) (struct page*)AllocPage()
 
#endif
 
/drivers/include/linux/lockdep.h
498,14 → 498,17
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
# else
# define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
# define rwsem_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, n, i)
# define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
# endif
# define rwsem_release(l, n, i) lock_release(l, n, i)
#else
# define rwsem_acquire(l, s, t, i) do { } while (0)
# define rwsem_acquire_nest(l, s, t, n, i) do { } while (0)
# define rwsem_acquire_read(l, s, t, i) do { } while (0)
# define rwsem_release(l, n, i) do { } while (0)
#endif
/drivers/include/linux/mm.h
0,0 → 1,0
 
/drivers/include/linux/module.h
9,6 → 9,7
#include <linux/list.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/export.h>
 
 
 
/drivers/include/linux/rbtree.h
0,0 → 1,84
/*
Red Black Trees
(C) 1999 Andrea Arcangeli <andrea@suse.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
 
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
 
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
 
linux/include/linux/rbtree.h
 
To use rbtrees you'll have to implement your own insert and search cores.
This will avoid us to use callbacks and to drop drammatically performances.
I know it's not the cleaner way, but in C (not in C++) to get
performances and genericity...
 
See Documentation/rbtree.txt for documentation and samples.
*/
 
#ifndef _LINUX_RBTREE_H
#define _LINUX_RBTREE_H
 
#include <linux/kernel.h>
#include <linux/stddef.h>
 
struct rb_node {
unsigned long __rb_parent_color;
struct rb_node *rb_right;
struct rb_node *rb_left;
} __attribute__((aligned(sizeof(long))));
/* The alignment might seem pointless, but allegedly CRIS needs it */
 
struct rb_root {
struct rb_node *rb_node;
};
 
 
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
 
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
 
#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
 
/* 'empty' nodes are nodes that are known not to be inserted in an rbree */
#define RB_EMPTY_NODE(node) \
((node)->__rb_parent_color == (unsigned long)(node))
#define RB_CLEAR_NODE(node) \
((node)->__rb_parent_color = (unsigned long)(node))
 
 
extern void rb_insert_color(struct rb_node *, struct rb_root *);
extern void rb_erase(struct rb_node *, struct rb_root *);
 
 
/* Find logical next and previous nodes in a tree */
extern struct rb_node *rb_next(const struct rb_node *);
extern struct rb_node *rb_prev(const struct rb_node *);
extern struct rb_node *rb_first(const struct rb_root *);
extern struct rb_node *rb_last(const struct rb_root *);
 
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
struct rb_root *root);
 
static inline void rb_link_node(struct rb_node * node, struct rb_node * parent,
struct rb_node ** rb_link)
{
node->__rb_parent_color = (unsigned long)parent;
node->rb_left = node->rb_right = NULL;
 
*rb_link = node;
}
 
#endif /* _LINUX_RBTREE_H */
/drivers/include/linux/rwlock.h
0,0 → 1,125
#ifndef __LINUX_RWLOCK_H
#define __LINUX_RWLOCK_H
 
#ifndef __LINUX_SPINLOCK_H
# error "please don't include this file directly"
#endif
 
/*
* rwlock related methods
*
* split out from spinlock.h
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
*/
 
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __rwlock_init(rwlock_t *lock, const char *name,
struct lock_class_key *key);
# define rwlock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__rwlock_init((lock), #lock, &__key); \
} while (0)
#else
# define rwlock_init(lock) \
do { *(lock) = __RW_LOCK_UNLOCKED(lock); } while (0)
#endif
 
#ifdef CONFIG_DEBUG_SPINLOCK
extern void do_raw_read_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_read_lock_flags(lock, flags) do_raw_read_lock(lock)
extern int do_raw_read_trylock(rwlock_t *lock);
extern void do_raw_read_unlock(rwlock_t *lock) __releases(lock);
extern void do_raw_write_lock(rwlock_t *lock) __acquires(lock);
#define do_raw_write_lock_flags(lock, flags) do_raw_write_lock(lock)
extern int do_raw_write_trylock(rwlock_t *lock);
extern void do_raw_write_unlock(rwlock_t *lock) __releases(lock);
#else
# define do_raw_read_lock(rwlock) do {__acquire(lock); arch_read_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_read_lock_flags(lock, flags) \
do {__acquire(lock); arch_read_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_read_trylock(rwlock) arch_read_trylock(&(rwlock)->raw_lock)
# define do_raw_read_unlock(rwlock) do {arch_read_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
# define do_raw_write_lock(rwlock) do {__acquire(lock); arch_write_lock(&(rwlock)->raw_lock); } while (0)
# define do_raw_write_lock_flags(lock, flags) \
do {__acquire(lock); arch_write_lock_flags(&(lock)->raw_lock, *(flags)); } while (0)
# define do_raw_write_trylock(rwlock) arch_write_trylock(&(rwlock)->raw_lock)
# define do_raw_write_unlock(rwlock) do {arch_write_unlock(&(rwlock)->raw_lock); __release(lock); } while (0)
#endif
 
#define read_can_lock(rwlock) arch_read_can_lock(&(rwlock)->raw_lock)
#define write_can_lock(rwlock) arch_write_can_lock(&(rwlock)->raw_lock)
 
/*
* Define the various rw_lock methods. Note we define these
* regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
* methods are defined as nops in the case they are not required.
*/
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
 
#define write_lock(lock) _raw_write_lock(lock)
#define read_lock(lock) _raw_read_lock(lock)
 
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_read_lock_irqsave(lock); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_write_lock_irqsave(lock); \
} while (0)
 
#else
 
#define read_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_read_lock_irqsave(lock, flags); \
} while (0)
#define write_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_write_lock_irqsave(lock, flags); \
} while (0)
 
#endif
 
#define read_lock_irq(lock) _raw_read_lock_irq(lock)
#define read_lock_bh(lock) _raw_read_lock_bh(lock)
#define write_lock_irq(lock) _raw_write_lock_irq(lock)
#define write_lock_bh(lock) _raw_write_lock_bh(lock)
#define read_unlock(lock) _raw_read_unlock(lock)
#define write_unlock(lock) _raw_write_unlock(lock)
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
 
#define read_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_read_unlock_irqrestore(lock, flags); \
} while (0)
#define read_unlock_bh(lock) _raw_read_unlock_bh(lock)
 
#define write_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_write_unlock_irqrestore(lock, flags); \
} while (0)
#define write_unlock_bh(lock) _raw_write_unlock_bh(lock)
 
#define write_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
write_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
 
#endif /* __LINUX_RWLOCK_H */
/drivers/include/linux/scatterlist.h
0,0 → 1,274
#ifndef _LINUX_SCATTERLIST_H
#define _LINUX_SCATTERLIST_H
 
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/mm.h>
 
#include <asm/types.h>
#include <asm/scatterlist.h>
//#include <asm/io.h>
 
struct sg_table {
struct scatterlist *sgl; /* the list */
unsigned int nents; /* number of mapped entries */
unsigned int orig_nents; /* original size of list */
};
 
/*
* Notes on SG table design.
*
* Architectures must provide an unsigned long page_link field in the
* scatterlist struct. We use that to place the page pointer AND encode
* information about the sg table as well. The two lower bits are reserved
* for this information.
*
* If bit 0 is set, then the page_link contains a pointer to the next sg
* table list. Otherwise the next entry is at sg + 1.
*
* If bit 1 is set, then this sg entry is the last element in a list.
*
* See sg_next().
*
*/
 
#define SG_MAGIC 0x87654321
 
/*
* We overload the LSB of the page pointer to indicate whether it's
* a valid sg entry, or whether it points to the start of a new scatterlist.
* Those low bits are there for everyone! (thanks mason :-)
*/
#define sg_is_chain(sg) ((sg)->page_link & 0x01)
#define sg_is_last(sg) ((sg)->page_link & 0x02)
#define sg_chain_ptr(sg) \
((struct scatterlist *) ((sg)->page_link & ~0x03))
 
/**
* sg_assign_page - Assign a given page to an SG entry
* @sg: SG entry
* @page: The page
*
* Description:
* Assign page to sg entry. Also see sg_set_page(), the most commonly used
* variant.
*
**/
static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
{
unsigned long page_link = sg->page_link & 0x3;
 
/*
* In order for the low bit stealing approach to work, pages
* must be aligned at a 32-bit boundary as a minimum.
*/
BUG_ON((unsigned long) page & 0x03);
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
}
 
/**
* sg_set_page - Set sg entry to point at given page
* @sg: SG entry
* @page: The page
* @len: Length of data
* @offset: Offset into page
*
* Description:
* Use this function to set an sg entry pointing at a page, never assign
* the page directly. We encode sg table information in the lower bits
* of the page pointer. See sg_page() for looking up the page belonging
* to an sg entry.
*
**/
static inline void sg_set_page(struct scatterlist *sg, struct page *page,
unsigned int len, unsigned int offset)
{
sg_assign_page(sg, page);
sg->offset = offset;
sg->length = len;
}
 
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~0x3);
}
 
/**
* sg_set_buf - Set sg entry to point at given data
* @sg: SG entry
* @buf: Data
* @buflen: Data length
*
**/
//static inline void sg_set_buf(struct scatterlist *sg, const void *buf,
// unsigned int buflen)
//{
// sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
//}
 
/*
* Loop over each sg element, following the pointer to a new list if necessary
*/
#define for_each_sg(sglist, sg, nr, __i) \
for (__i = 0, sg = (sglist); __i < (nr); __i++, sg = sg_next(sg))
 
/**
* sg_chain - Chain two sglists together
* @prv: First scatterlist
* @prv_nents: Number of entries in prv
* @sgl: Second scatterlist
*
* Description:
* Links @prv@ and @sgl@ together, to form a longer scatterlist.
*
**/
static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
struct scatterlist *sgl)
{
#ifndef ARCH_HAS_SG_CHAIN
BUG();
#endif
 
/*
* offset and length are unused for chain entry. Clear them.
*/
prv[prv_nents - 1].offset = 0;
prv[prv_nents - 1].length = 0;
 
/*
* Set lowest bit to indicate a link pointer, and make sure to clear
* the termination bit if it happens to be set.
*/
prv[prv_nents - 1].page_link = ((unsigned long) sgl | 0x01) & ~0x02;
}
 
/**
* sg_mark_end - Mark the end of the scatterlist
* @sg: SG entryScatterlist
*
* Description:
* Marks the passed in sg entry as the termination point for the sg
* table. A call to sg_next() on this entry will return NULL.
*
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
/*
* Set termination bit, clear potential chain bit
*/
sg->page_link |= 0x02;
sg->page_link &= ~0x01;
}
 
/**
* sg_phys - Return physical address of an sg entry
* @sg: SG entry
*
* Description:
* This calls page_to_phys() on the page in this sg entry, and adds the
* sg offset. The caller must know that it is legal to call page_to_phys()
* on the sg page.
*
**/
static inline dma_addr_t sg_phys(struct scatterlist *sg)
{
return page_to_phys(sg_page(sg)) + sg->offset;
}
 
/**
* sg_virt - Return virtual address of an sg entry
* @sg: SG entry
*
* Description:
* This calls page_address() on the page in this sg entry, and adds the
* sg offset. The caller must know that the sg page has a valid virtual
* mapping.
*
**/
//static inline void *sg_virt(struct scatterlist *sg)
//{
// return page_address(sg_page(sg)) + sg->offset;
//}
 
int sg_nents(struct scatterlist *sg);
struct scatterlist *sg_next(struct scatterlist *);
struct scatterlist *sg_last(struct scatterlist *s, unsigned int);
void sg_init_table(struct scatterlist *, unsigned int);
void sg_init_one(struct scatterlist *, const void *, unsigned int);
 
typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t);
typedef void (sg_free_fn)(struct scatterlist *, unsigned int);
 
void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *);
void sg_free_table(struct sg_table *);
int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t,
sg_alloc_fn *);
int sg_alloc_table(struct sg_table *, unsigned int, gfp_t);
int sg_alloc_table_from_pages(struct sg_table *sgt,
struct page **pages, unsigned int n_pages,
unsigned long offset, unsigned long size,
gfp_t gfp_mask);
 
size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
void *buf, size_t buflen);
 
/*
* Maximum number of entries that will be allocated in one piece, if
* a list larger than this is required then chaining will be utilized.
*/
#define SG_MAX_SINGLE_ALLOC (PAGE_SIZE / sizeof(struct scatterlist))
 
 
/*
* Mapping sg iterator
*
* Iterates over sg entries mapping page-by-page. On each successful
* iteration, @miter->page points to the mapped page and
* @miter->length bytes of data can be accessed at @miter->addr. As
* long as an interation is enclosed between start and stop, the user
* is free to choose control structure and when to stop.
*
* @miter->consumed is set to @miter->length on each iteration. It
* can be adjusted if the user can't consume all the bytes in one go.
* Also, a stopped iteration can be resumed by calling next on it.
* This is useful when iteration needs to release all resources and
* continue later (e.g. at the next interrupt).
*/
 
#define SG_MITER_ATOMIC (1 << 0) /* use kmap_atomic */
#define SG_MITER_TO_SG (1 << 1) /* flush back to phys on unmap */
#define SG_MITER_FROM_SG (1 << 2) /* nop */
 
struct sg_mapping_iter {
/* the following three fields can be accessed directly */
struct page *page; /* currently mapped page */
void *addr; /* pointer to the mapped area */
size_t length; /* length of the mapped area */
size_t consumed; /* number of consumed bytes */
 
/* these are internal states, keep away */
struct scatterlist *__sg; /* current entry */
unsigned int __nents; /* nr of remaining entries */
unsigned int __offset; /* offset within sg */
unsigned int __flags;
};
 
void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
unsigned int nents, unsigned int flags);
bool sg_miter_next(struct sg_mapping_iter *miter);
void sg_miter_stop(struct sg_mapping_iter *miter);
 
#endif /* _LINUX_SCATTERLIST_H */
/drivers/include/linux/slab.h
1,2 → 1,3
 
#include <errno.h>
// stub
/drivers/include/linux/spinlock_types.h
17,7 → 17,7
 
#include <linux/lockdep.h>
 
typedef struct {
typedef struct spinlock {
raw_spinlock_t raw_lock;
#ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock;
/drivers/include/syscall.h
516,11 → 516,11
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
 
 
static void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
//static void init_rwsem(struct rw_semaphore *sem)
//{
// sem->count = RWSEM_UNLOCKED_VALUE;
// spin_lock_init(&sem->wait_lock);
// INIT_LIST_HEAD(&sem->wait_list);
//}
 
#endif
/drivers/video/drm/drm_crtc_helper.c
135,8 → 135,10
if (connector->funcs->force)
connector->funcs->force(connector);
} else {
// dbgprintf("call detect funcs %p ", connector->funcs);
// dbgprintf("detect %p\n", connector->funcs->detect);
connector->status = connector->funcs->detect(connector, true);
// drm_kms_helper_poll_enable(dev);
// dbgprintf("status %x\n", connector->status);
}
 
if (connector->status == connector_status_disconnected) {
296,7 → 298,6
crtc->fb = NULL;
}
}
 
}
EXPORT_SYMBOL(drm_helper_disable_unused_functions);
 
/drivers/video/drm/drm_fb_helper.c
61,8 → 61,6
struct drm_connector *connector;
int i;
 
ENTER();
 
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_helper_connector;
 
73,7 → 71,6
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
LEAVE();
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
81,7 → 78,6
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
FAIL();
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
194,10 → 190,6
struct drm_crtc *crtc;
int i;
 
ENTER();
 
dbgprintf("crtc_count %d max_conn_count %d\n", crtc_count, max_conn_count);
 
fb_helper->dev = dev;
 
INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
204,16 → 196,12
 
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
{
FAIL();
return -ENOMEM;
};
 
fb_helper->crtc_count = crtc_count;
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
if (!fb_helper->connector_info) {
kfree(fb_helper->crtc_info);
FAIL();
return -ENOMEM;
}
fb_helper->connector_count = 0;
235,11 → 223,9
i++;
}
 
LEAVE();
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
FAIL();
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_init);
599,8 → 585,8
if (new_fb) {
info->var.pixclock = 0;
 
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
dev_info(fb_helper->dev->dev, "fb%d: %s frame buffer device\n",
info->node, info->fix.id);
 
} else {
drm_fb_helper_set_par(info);
996,10 → 982,7
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
bool ret;
 
ENTER();
 
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(fb_helper->dev);
 
1016,8 → 999,7
 
drm_setup_crtcs(fb_helper);
 
ret = drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
LEAVE();
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
 
/drivers/video/drm/i915/i915.map
File deleted
/drivers/video/drm/i915/Gtt/intel-agp.h
62,12 → 62,6
#define I810_PTE_LOCAL 0x00000002
#define I810_PTE_VALID 0x00000001
#define I830_PTE_SYSTEM_CACHED 0x00000006
/* GT PTE cache control fields */
#define GEN6_PTE_UNCACHED 0x00000002
#define HSW_PTE_UNCACHED 0x00000000
#define GEN6_PTE_LLC 0x00000004
#define GEN6_PTE_LLC_MLC 0x00000006
#define GEN6_PTE_GFDT 0x00000008
 
#define I810_SMRAM_MISCC 0x70
#define I810_GFX_MEM_WIN_SIZE 0x00010000
97,7 → 91,6
#define G4x_GMCH_SIZE_VT_2M (G4x_GMCH_SIZE_2M | G4x_GMCH_SIZE_VT_EN)
 
#define GFX_FLSH_CNTL 0x2170 /* 915+ */
#define GFX_FLSH_CNTL_VLV 0x101008
 
#define I810_DRAM_CTL 0x3000
#define I810_DRAM_ROW_0 0x00000001
148,29 → 141,6
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
 
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
#define SNB_GTT_SIZE_0M (0 << 8)
#define SNB_GTT_SIZE_1M (1 << 8)
#define SNB_GTT_SIZE_2M (2 << 8)
#define SNB_GTT_SIZE_MASK (3 << 8)
 
/* pci devices ids */
#define PCI_DEVICE_ID_INTEL_E7221_HB 0x2588
#define PCI_DEVICE_ID_INTEL_E7221_IG 0x258a
219,66 → 189,5
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100 /* Desktop */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG 0x0102
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG 0x0112
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG 0x0122
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104 /* Mobile */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG 0x0106
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG 0x0116
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG 0x0126
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB 0x0108 /* Server */
#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG 0x010A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_HB 0x0150 /* Desktop */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG 0x0152
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG 0x0162
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_HB 0x0154 /* Mobile */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG 0x0156
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG 0x0166
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_HB 0x0158 /* Server */
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG 0x015A
#define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */
#define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30
#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412
#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422
#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416
#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426
#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a
#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a
#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A
#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A
#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A
#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A
 
#endif
/drivers/video/drm/i915/Gtt/intel-gtt.c
20,6 → 20,8
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/scatterlist.h>
 
//#include <linux/pagemap.h>
//#include <linux/agp_backend.h>
//#include <asm/smp.h>
26,7 → 28,7
#include <linux/spinlock.h>
#include "agp.h"
#include "intel-agp.h"
#include "intel-gtt.h"
#include <drm/intel-gtt.h>
 
#include <syscall.h>
 
110,14 → 112,15
 
static int intel_gtt_setup_scratch_page(void)
{
struct page *page;
dma_addr_t dma_addr;
 
dma_addr = AllocPage();
if (dma_addr == 0)
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
intel_private.base.scratch_page_dma = page_to_phys(page);
 
intel_private.base.scratch_page_dma = dma_addr;
intel_private.scratch_page = NULL;
intel_private.scratch_page = page;
 
return 0;
}
158,62 → 161,6
stolen_size = 0;
break;
}
} else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
u16 snb_gmch_ctl;
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
stolen_size = MB(512);
break;
}
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
347,29 → 294,9
 
static unsigned int intel_gtt_total_entries(void)
{
int size;
 
if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
return i965_gtt_total_entries();
else if (INTEL_GTT_GEN == 6) {
u16 snb_gmch_ctl;
 
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
default:
case SNB_GTT_SIZE_0M:
printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
size = MB(0);
break;
case SNB_GTT_SIZE_1M:
size = MB(1);
break;
case SNB_GTT_SIZE_2M:
size = MB(2);
break;
}
return size/4;
} else {
else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
433,11 → 360,8
 
ret = intel_private.driver->setup();
if (ret != 0)
{
return ret;
};
 
 
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries();
 
457,9 → 381,6
gtt_map_size = intel_private.base.gtt_total_entries * 4;
 
intel_private.gtt = NULL;
// if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
// intel_private.gtt = ioremap_wc(intel_private.gtt_bus_addr,
// gtt_map_size);
if (intel_private.gtt == NULL)
intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
gtt_map_size);
509,9 → 430,6
{
u8 __iomem *reg;
 
if (INTEL_GTT_GEN >= 6)
return true;
 
if (INTEL_GTT_GEN == 2) {
u16 gmch_ctrl;
 
565,33 → 483,39
return false;
}
 
void intel_gtt_insert_sg_entries(struct pagelist *st,
void intel_gtt_insert_sg_entries(struct sg_table *st,
unsigned int pg_start,
unsigned int flags)
{
struct scatterlist *sg;
unsigned int len, m;
int i, j;
 
j = pg_start;
 
for(i = 0; i < st->nents; i++)
{
dma_addr_t addr = st->page[i];
/* sg may merge pages, but we have to separate
* per-page addr for GTT */
for_each_sg(st->sgl, sg, st->nents, i) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
intel_private.driver->write_entry(addr, j, flags);
j++;
};
 
}
}
readl(intel_private.gtt+j-1);
}
EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
 
static void intel_gtt_insert_pages(unsigned int first_entry,
unsigned int num_entries,
dma_addr_t *pages,
struct page **pages,
unsigned int flags)
{
int i, j;
 
for (i = 0, j = first_entry; i < num_entries; i++, j++) {
dma_addr_t addr = pages[i];
dma_addr_t addr = page_to_phys(pages[i]);
intel_private.driver->write_entry(addr,
j, flags);
}
670,85 → 594,6
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static bool gen6_check_flags(unsigned int flags)
{
return true;
}
 
static void haswell_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static void gen6_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
} else { /* set 'normal'/'cached' to LLC by default */
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
}
 
static void valleyview_write_entry(dma_addr_t addr, unsigned int entry,
unsigned int flags)
{
unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
u32 pte_flags;
 
if (type_mask == AGP_USER_MEMORY)
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
else {
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
if (gfdt)
pte_flags |= GEN6_PTE_GFDT;
}
 
/* gen6 has bit11-4 for physical addr bit39-32 */
addr |= (addr >> 28) & 0xff0;
writel(addr | pte_flags, intel_private.gtt + entry);
 
writel(1, intel_private.registers + GFX_FLSH_CNTL_VLV);
}
 
static void gen6_cleanup(void)
{
}
 
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
770,7 → 615,7
 
static int i9xx_setup(void)
{
u32 reg_addr;
u32 reg_addr, gtt_addr;
int size = KB(512);
 
pci_read_config_dword(intel_private.pcidev, I915_MMADDR, &reg_addr);
777,35 → 622,23
 
reg_addr &= 0xfff80000;
 
if (INTEL_GTT_GEN >= 7)
size = MB(2);
 
intel_private.registers = ioremap(reg_addr, size);
if (!intel_private.registers)
return -ENOMEM;
 
if (INTEL_GTT_GEN == 3) {
u32 gtt_addr;
 
switch (INTEL_GTT_GEN) {
case 3:
pci_read_config_dword(intel_private.pcidev,
I915_PTEADDR, &gtt_addr);
intel_private.gtt_bus_addr = gtt_addr;
} else {
u32 gtt_offset;
 
switch (INTEL_GTT_GEN) {
break;
case 5:
case 6:
case 7:
gtt_offset = MB(2);
intel_private.gtt_bus_addr = reg_addr + MB(2);
break;
case 4:
default:
gtt_offset = KB(512);
intel_private.gtt_bus_addr = reg_addr + KB(512);
break;
}
intel_private.gtt_bus_addr = reg_addr + gtt_offset;
}
 
if (needs_idle_maps())
intel_private.base.do_idle_maps = 1;
875,32 → 708,6
.check_flags = i830_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver sandybridge_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = gen6_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver haswell_gtt_driver = {
.gen = 6,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = haswell_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
.chipset_flush = i9xx_chipset_flush,
};
static const struct intel_gtt_driver valleyview_gtt_driver = {
.gen = 7,
.setup = i9xx_setup,
.cleanup = gen6_cleanup,
.write_entry = valleyview_write_entry,
.dma_mask_size = 40,
.check_flags = gen6_check_flags,
};
 
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non-null, and find_gmch will determine
963,106 → 770,6
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
"HD Graphics", &ironlake_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
"Sandybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG,
"Ivybridge", &sandybridge_gtt_driver },
{ PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG,
"ValleyView", &valleyview_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG,
"Haswell", &haswell_gtt_driver },
{ PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG,
"Haswell", &haswell_gtt_driver },
{ 0, NULL, NULL }
};
 
1107,7 → 814,7
 
intel_private.bridge_dev = bridge_pdev;
 
dbgprintf("Intel %s Chipset\n", intel_gtt_chipsets[i].name);
dev_info(&bridge_pdev->dev, "Intel %s Chipset\n", intel_gtt_chipsets[i].name);
 
mask = intel_private.driver->dma_mask_size;
// if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
1127,7 → 834,7
}
EXPORT_SYMBOL(intel_gmch_probe);
 
const struct intel_gtt *intel_gtt_get(void)
struct intel_gtt *intel_gtt_get(void)
{
return &intel_private.base;
}
1141,7 → 848,5
EXPORT_SYMBOL(intel_gtt_chipset_flush);
 
 
//phys_addr_t get_bus_addr(void)
//{
// return intel_private.gma_bus_addr;
//};
MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
MODULE_LICENSE("GPL and additional rights");
/drivers/video/drm/i915/bitmap.c
6,7 → 6,9
#include "hmm.h"
#include "bitmap.h"
 
#define DRIVER_CAPS_0 HW_BIT_BLIT;
//#define DRIVER_CAPS_0 HW_BIT_BLIT;
 
#define DRIVER_CAPS_0 0
#define DRIVER_CAPS_1 0
 
struct context *context_map[256];
/drivers/video/drm/i915/i915_dma.c
106,32 → 106,6
}
 
/**
* Sets up the hardware status page for devices that need a physical address
* in the register.
*/
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
 
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
 
if (!dev_priv->status_page_dmah) {
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
 
memset((void __force __iomem *)dev_priv->status_page_dmah->vaddr,
0, PAGE_SIZE);
 
i915_write_hws_pga(dev);
 
DRM_DEBUG_DRIVER("Enabled hardware status page\n");
return 0;
}
 
/**
* Frees the hardware status page, whether it's a physical address or a virtual
* address set up by the X Server.
*/
171,7 → 145,7
 
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
ring->space = ring->head - (ring->tail + I915_RING_FREE_SPACE);
if (ring->space < 0)
ring->space += ring->size;
 
455,16 → 429,16
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 0;
dev_priv->dri1.counter++;
if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->dri1.counter = 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
606,12 → 580,12
 
ADVANCE_LP_RING();
 
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(0);
ADVANCE_LP_RING();
}
622,10 → 596,8
 
static int i915_quiescent(struct drm_device *dev)
{
struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
 
i915_kernel_lost_context(dev);
return intel_wait_ring_idle(ring);
return intel_ring_idle(LP_RING(dev->dev_private));
}
 
static int i915_flush_ioctl(struct drm_device *dev, void *data,
779,21 → 751,21
 
DRM_DEBUG_DRIVER("\n");
 
dev_priv->counter++;
if (dev_priv->counter > 0x7FFFFFFFUL)
dev_priv->counter = 1;
dev_priv->dri1.counter++;
if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
dev_priv->dri1.counter = 1;
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
 
if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_STORE_DWORD_INDEX);
OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
OUT_RING(dev_priv->counter);
OUT_RING(dev_priv->dri1.counter);
OUT_RING(MI_USER_INTERRUPT);
ADVANCE_LP_RING();
}
 
return dev_priv->counter;
return dev_priv->dri1.counter;
}
 
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
824,7 → 796,7
 
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
}
 
return ret;
1018,6 → 990,12
case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
value = 1;
break;
case I915_PARAM_HAS_SECURE_BATCHES:
value = capable(CAP_SYS_ADMIN);
break;
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
1074,7 → 1052,7
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
struct intel_ring_buffer *ring = LP_RING(dev_priv);
struct intel_ring_buffer *ring;
 
if (drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
1094,6 → 1072,7
 
DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
 
ring = LP_RING(dev_priv);
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
 
dev_priv->dri1.gfx_hws_cpu_addr =
1299,19 → 1278,7
 
info = (struct intel_device_info *) flags;
 
#if 0
/* Refuse to load on gen6+ without kms enabled. */
if (info->gen >= 6 && !drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
 
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
dev->types[7] = _DRM_STAT_PRIMARY;
dev->types[8] = _DRM_STAT_SECONDARY;
dev->types[9] = _DRM_STAT_DMA;
#endif
 
dev_priv = kzalloc(sizeof(drm_i915_private_t), GFP_KERNEL);
if (dev_priv == NULL)
return -ENOMEM;
1327,26 → 1294,14
goto free_priv;
}
 
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
ret = -EIO;
ret = i915_gem_gtt_init(dev);
if (ret)
goto put_bridge;
}
 
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
ret = -ENODEV;
goto put_gmch;
}
 
 
pci_set_master(dev->pdev);
 
/* overlay on gen2 is broken and can't address above 1G */
// if (IS_GEN2(dev))
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
 
/* 965GM sometimes incorrectly writes to hardware status page (HWS)
* using 32bit addressing, overwriting memory if HWS is located
1356,8 → 1311,6
* behaviour if any general state is accessed within a page above 4GB,
* which also needs to be handled carefully.
*/
// if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
// dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
 
mmio_bar = IS_GEN2(dev) ? 1 : 0;
/* Before gen4, the registers and the GTT are behind different BARs.
1382,11 → 1335,7
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
 
DRM_INFO("gtt_base_addr %x aperture_size %d\n",
dev_priv->mm.gtt_base_addr, aperture_size );
 
// i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
// aperture_size);
 
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
1419,18 → 1368,10
intel_setup_gmbus(dev);
intel_opregion_setup(dev);
 
/* Make sure the bios did its job and set up vital registers */
intel_setup_bios(dev);
 
i915_gem_load(dev);
 
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
if (ret)
goto out_gem_unload;
}
 
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
1448,6 → 1389,8
spin_lock_init(&dev_priv->rps.lock);
spin_lock_init(&dev_priv->dpio_lock);
 
mutex_init(&dev_priv->rps.hw_lock);
 
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
dev_priv->num_pipe = 3;
else if (IS_MOBILE(dev) || !IS_GEN2(dev))
1469,13 → 1412,8
}
 
/* Must be done after probing outputs */
// intel_opregion_init(dev);
// acpi_video_register();
 
// setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
// (unsigned long) dev);
 
 
if (IS_GEN5(dev))
intel_gpu_ips_init(dev_priv);
 
1547,6 → 1485,7
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
intel_fbdev_fini(dev);
intel_modeset_cleanup(dev);
cancel_work_sync(&dev_priv->console_resume_work);
 
/*
* free the memory space allocated for the child device
/drivers/video/drm/i915/i915_drv.c
33,7 → 33,6
#include "i915_drv.h"
#include "intel_drv.h"
 
 
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
384,26 → 383,36
pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
if (pch) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
int id;
unsigned short id;
id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
 
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_IBX;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
WARN_ON(!IS_GEN5(dev));
} else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found CougarPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
/* PantherPoint is CPT compatible */
dev_priv->pch_type = PCH_CPT;
dev_priv->num_pch_pll = 2;
DRM_DEBUG_KMS("Found PatherPoint PCH\n");
WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
WARN_ON(!IS_HASWELL(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
dev_priv->num_pch_pll = 0;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
}
BUG_ON(dev_priv->num_pch_pll > I915_NUM_PLLS);
}
449,7 → 458,7
struct intel_device_info *intel_info =
(struct intel_device_info *) ent->driver_data;
 
if (intel_info->is_haswell || intel_info->is_valleyview)
if (intel_info->is_valleyview)
if(!i915_preliminary_hw_support) {
DRM_ERROR("Preliminary hardware support disabled\n");
return -ENODEV;
473,6 → 482,8
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct drm_device *dev;
static struct drm_driver driver;
 
int ret;
 
dev = kzalloc(sizeof(*dev), 0);
503,6 → 514,8
mutex_init(&dev->struct_mutex);
mutex_init(&dev->ctxlist_mutex);
 
dev->driver = &driver;
 
ret = i915_driver_load(dev, ent->driver_data );
 
if (ret)
609,12 → 622,40
if (reg == GEN6_GDRST)
return false;
 
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
 
return true;
}
 
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
/* WaIssueDummyWriteToWakeupFromRC6: Issue a dummy write to wake up the
* chip from rc6 before touching it for real. MI_MODE is masked, hence
* harmless to write 0 into. */
I915_WRITE_NOTRACE(MI_MODE, 0);
}
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
unsigned long irqflags; \
spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
645,6 → 686,12
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
__fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
} \
if (IS_GEN5(dev_priv->dev)) \
ilk_dummy_write(dev_priv); \
if (IS_HASWELL(dev_priv->dev) && (I915_READ_NOTRACE(GEN7_ERR_INT) & ERR_INT_MMIO_UNCLAIMED)) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
/drivers/video/drm/i915/i915_drv.h
33,6 → 33,7
#include "i915_reg.h"
#include "intel_bios.h"
#include "intel_ringbuffer.h"
#include <linux/scatterlist.h>
//#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
40,6 → 41,7
//#include <linux/backlight.h>
 
#include <linux/spinlock.h>
#include <linux/err.h>
 
 
/* General customization:
69,6 → 71,14
};
#define pipe_name(p) ((p) + 'A')
 
enum transcoder {
TRANSCODER_A = 0,
TRANSCODER_B,
TRANSCODER_C,
TRANSCODER_EDP = 0xF,
};
#define transcoder_name(t) ((t) + 'A')
 
enum plane {
PLANE_A = 0,
PLANE_B,
104,6 → 114,12
};
#define I915_NUM_PLLS 2
 
struct intel_ddi_plls {
int spll_refcount;
int wrpll1_refcount;
int wrpll2_refcount;
};
 
/* Interface history:
*
* 1.1: Original.
127,14 → 143,8
#define I915_GEM_PHYS_OVERLAY_REGS 3
#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
 
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
int start;
int size;
struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
 
 
struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
181,19 → 191,24
struct intel_display_error_state;
 
struct drm_i915_error_state {
struct kref ref;
u32 eir;
u32 pgtbl_er;
u32 ier;
u32 ccid;
u32 derrmr;
u32 forcewake;
bool waiting[I915_NUM_RINGS];
u32 pipestat[I915_MAX_PIPES];
u32 tail[I915_NUM_RINGS];
u32 head[I915_NUM_RINGS];
u32 ctl[I915_NUM_RINGS];
u32 ipeir[I915_NUM_RINGS];
u32 ipehr[I915_NUM_RINGS];
u32 instdone[I915_NUM_RINGS];
u32 acthd[I915_NUM_RINGS];
u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
/* our own tracking of ring head and tail */
u32 cpu_ring_head[I915_NUM_RINGS];
254,6 → 269,7
uint32_t sprite_width, int pixel_size);
void (*update_linetime_wm)(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
void (*modeset_global_resources)(struct drm_device *dev);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
266,7 → 282,6
struct drm_crtc *crtc);
void (*fdi_link_train)(struct drm_crtc *crtc);
void (*init_clock_gating)(struct drm_device *dev);
void (*init_pch_clock_gating)(struct drm_device *dev);
int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_i915_gem_object *obj);
341,8 → 356,9
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
struct drm_device *dev;
unsigned num_pd_entries;
dma_addr_t *pt_pages;
struct page **pt_pages;
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
377,6 → 393,11
PCH_LPT, /* Lynxpoint PCH */
};
 
enum intel_sbi_destination {
SBI_ICLK,
SBI_MPHY,
};
 
#define QUIRK_PIPEA_FORCE (1<<0)
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
#define QUIRK_INVERT_BRIGHTNESS (1<<2)
386,7 → 407,7
 
struct intel_gmbus {
struct i2c_adapter adapter;
bool force_bit;
u32 force_bit;
u32 reg0;
u32 gpio_reg;
struct i2c_algo_bit_data bit_algo;
393,147 → 414,11
struct drm_i915_private *dev_priv;
};
 
typedef struct drm_i915_private {
struct drm_device *dev;
 
const struct intel_device_info *info;
 
int relative_constants_mode;
 
void __iomem *regs;
 
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
 
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
 
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
 
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
 
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
 
drm_dma_handle_t *status_page_dmah;
uint32_t counter;
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
 
// struct resource mch_res;
 
atomic_t irq_received;
 
/* protects the irq masks */
spinlock_t irq_lock;
 
/* DPIO indirect register protection */
spinlock_t dpio_lock;
 
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
 
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
 
int num_pipe;
int num_pch_pll;
 
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
unsigned int stop_rings;
 
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
// struct intel_fbc_work *fbc_work;
 
struct intel_opregion opregion;
 
/* overlay */
// struct intel_overlay *overlay;
bool sprite_scaling_enabled;
 
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
 
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
 
// struct notifier_block lid_notifier;
 
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
unsigned int fsb_freq, mem_freq, is_ddr3;
 
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
 
/* Display functions */
struct drm_i915_display_funcs display;
 
/* PCH chipset type */
enum intel_pch pch_type;
 
unsigned long quirks;
 
/* Register state */
bool modeset_on_lid;
struct i915_suspend_saved_registers {
u8 saveLBB;
u32 saveDSPACNTR;
u32 saveDSPBCNTR;
u32 saveDSPARB;
u32 saveHWS;
u32 savePIPEACONF;
u32 savePIPEBCONF;
u32 savePIPEASRC;
679,10 → 564,206
u32 savePIPEB_LINK_N1;
u32 saveMCHBAR_RENDER_STANDBY;
u32 savePCH_PORT_HOTPLUG;
};
 
struct intel_gen6_power_mgmt {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
 
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
 
struct delayed_work delayed_resume_work;
 
/*
* Protects RPS/RC6 register access and PCU communication.
* Must be taken after struct_mutex if nested.
*/
struct mutex hw_lock;
};
 
struct intel_ilk_power_mgmt {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
 
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
 
int c_m;
int r_t;
 
struct drm_i915_gem_object *pwrctx;
struct drm_i915_gem_object *renderctx;
};
 
struct i915_dri1_state {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
 
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
 
uint32_t counter;
};
 
struct intel_l3_parity {
u32 *remap_info;
struct work_struct error_work;
};
 
typedef struct drm_i915_private {
struct drm_device *dev;
 
const struct intel_device_info *info;
 
int relative_constants_mode;
 
void __iomem *regs;
 
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
struct spinlock gt_lock;
 
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
 
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
 
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
 
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
 
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
 
atomic_t irq_received;
 
/* protects the irq masks */
spinlock_t irq_lock;
 
/* DPIO indirect register protection */
spinlock_t dpio_lock;
 
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
 
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
 
int num_pipe;
int num_pch_pll;
 
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
 
unsigned int stop_rings;
 
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
 
struct intel_opregion opregion;
 
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
 
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
unsigned int lvds_val; /* used for checking LVDS channel mode */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
 
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
 
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 
unsigned int fsb_freq, mem_freq, is_ddr3;
 
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
 
/* Display functions */
struct drm_i915_display_funcs display;
 
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
 
unsigned long quirks;
 
/* Register state */
bool modeset_on_lid;
 
struct {
/** Bridge to intel-gtt-ko */
const struct intel_gtt *gtt;
struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
709,9 → 790,8
/** PPGTT used for aliasing the PPGTT with the GTT */
struct i915_hw_ppgtt *aliasing_ppgtt;
 
u32 *l3_remap_info;
 
// struct shrinker inactive_shrinker;
bool shrinker_no_lock_stealing;
 
/**
* List of objects currently involved in rendering.
788,19 → 868,6
u32 object_count;
} mm;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct {
unsigned allow_batchbuffer : 1;
u32 __iomem *gfx_hws_cpu_addr;
 
unsigned int cpp;
int back_offset;
int front_offset;
int current_page;
int page_flipping;
} dri1;
 
/* Kernel Modesetting */
 
struct sdvo_device_mapping sdvo_mappings[2];
814,6 → 881,7
wait_queue_head_t pending_flip_queue;
 
struct intel_pch_pll pch_plls[I915_NUM_PLLS];
struct intel_ddi_plls ddi_plls;
 
/* Reclocking support */
bool render_reclock_avail;
823,47 → 891,18
u16 orig_clock;
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
struct drm_connector *int_edp_connector;
 
bool mchbar_need_disable;
 
struct intel_l3_parity l3_parity;
 
/* gen6+ rps state */
struct {
struct work_struct work;
u32 pm_iir;
/* lock - irqsave spinlock that protectects the work_struct and
* pm_iir. */
spinlock_t lock;
struct intel_gen6_power_mgmt rps;
 
/* The below variables an all the rps hw state are protected by
* dev->struct mutext. */
u8 cur_delay;
u8 min_delay;
u8 max_delay;
} rps;
 
/* ilk-only ips/rps state. Everything in here is protected by the global
* mchdev_lock in intel_pm.c */
struct {
u8 cur_delay;
u8 min_delay;
u8 max_delay;
u8 fmax;
u8 fstart;
struct intel_ilk_power_mgmt ips;
 
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
u8 corr;
 
int c_m;
int r_t;
} ips;
 
enum no_fbc_reason no_fbc_reason;
 
struct drm_mm_node *compressed_fb;
874,6 → 913,12
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
 
/*
* The console may be contended at resume, but we don't
* want it to block on it.
*/
struct work_struct console_resume_work;
 
// struct backlight_device *backlight;
 
struct drm_property *broadcast_rgb_property;
881,6 → 926,14
 
bool hw_contexts_disabled;
uint32_t hw_context_size;
 
bool fdi_rx_polarity_reversed;
 
struct i915_suspend_saved_registers regfile;
 
/* Old dri1 support infrastructure, beware the dragons ya fools entering
* here! */
struct i915_dri1_state dri1;
} drm_i915_private_t;
 
/* Iterate over initialised rings */
924,7 → 977,7
 
const struct drm_i915_gem_object_ops *ops;
 
void *mapped;
// void *mapped;
 
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
1012,8 → 1065,8
unsigned int has_global_gtt_mapping:1;
unsigned int has_dma_mapping:1;
 
dma_addr_t *allocated_pages;
struct pagelist pages;
// dma_addr_t *allocated_pages;
struct sg_table *pages;
int pages_pin_count;
 
/* prime dma-buf support */
1062,6 → 1115,7
*/
atomic_t pending_flip;
};
#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
 
#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
1098,7 → 1152,7
 
struct drm_i915_file_private {
struct {
spinlock_t lock;
struct spinlock lock;
struct list_head request_list;
} mm;
struct idr context_idr;
1125,9 → 1179,17
#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
#define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
(dev)->pci_device == 0x0152 || \
(dev)->pci_device == 0x015a)
#define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
(dev)->pci_device == 0x0106 || \
(dev)->pci_device == 0x010A)
#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pci_device & 0xFF00) == 0x0A00)
 
/*
* The genX designation typically refers to the render engine, so render
1153,6 → 1215,9
#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
 
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
 
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
1173,6 → 1238,13
 
#define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
 
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
 
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1258,6 → 1330,7
 
extern void intel_irq_init(struct drm_device *dev);
extern void intel_gt_init(struct drm_device *dev);
extern void intel_gt_reset(struct drm_device *dev);
 
void i915_error_state_free(struct kref *error_ref);
 
1340,15 → 1413,23
void i915_gem_lastclose(struct drm_device *dev);
 
int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
 
static inline dma_addr_t i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
{
return obj->pages.page[n];
};
struct scatterlist *sg = obj->pages->sgl;
int nents = obj->pages->nents;
while (nents > SG_MAX_SINGLE_ALLOC) {
if (n < SG_MAX_SINGLE_ALLOC - 1)
break;
 
sg = sg_chain_ptr(sg + SG_MAX_SINGLE_ALLOC - 1);
n -= SG_MAX_SINGLE_ALLOC - 1;
nents -= SG_MAX_SINGLE_ALLOC - 1;
}
return sg_page(sg+n);
}
static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{
BUG_ON(obj->pages.page == NULL);
BUG_ON(obj->pages == NULL);
obj->pages_pin_count++;
}
static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1361,8 → 1442,7
int i915_gem_object_sync(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to);
void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno);
struct intel_ring_buffer *ring);
 
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
1380,7 → 1460,7
return (int32_t)(seq1 - seq2) >= 0;
}
 
u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
extern int i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 
int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1490,7 → 1570,15
unsigned long start,
unsigned long mappable_end,
unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
intel_gtt_chipset_flush();
}
 
 
/* i915_gem_evict.c */
int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
unsigned alignment,
1586,11 → 1674,12
extern void intel_modeset_gem_init(struct drm_device *dev);
extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
extern void ironlake_init_pch_refclk(struct drm_device *dev);
extern void intel_init_pch_refclk(struct drm_device *dev);
extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1619,6 → 1708,9
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
 
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
/drivers/video/drm/i915/i915_gem.c
30,7 → 30,6
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
//#include <linux/shmem_fs.h>
#include <linux/slab.h>
//#include <linux/swap.h>
#include <linux/pci.h>
53,21 → 52,6
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
static inline long IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
 
static inline void *ERR_PTR(long error)
{
return (void *) error;
}
 
static inline long PTR_ERR(const void *ptr)
{
return (long) ptr;
}
 
void
drm_gem_object_free(struct kref *kref)
{
921,12 → 905,12
* domain anymore. */
if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
i915_gem_clflush_object(obj);
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
}
}
 
if (needs_clflush_after)
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
 
return ret;
}
1389,6 → 1373,8
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
int page_count = obj->base.size / PAGE_SIZE;
struct scatterlist *sg;
int ret, i;
 
BUG_ON(obj->madv == __I915_MADV_PURGED);
1406,12 → 1392,18
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
 
for (i = 0; i < obj->pages.nents; i++)
FreePage(obj->pages.page[i]);
for_each_sg(obj->pages->sgl, sg, page_count, i) {
struct page *page = sg_page(sg);
 
DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, obj->pages.nents);
 
 
page_cache_release(page);
}
//DRM_DEBUG_KMS("%s release %d pages\n", __FUNCTION__, page_count);
obj->dirty = 0;
kfree(obj->pages.page);
 
sg_free_table(obj->pages);
kfree(obj->pages);
}
 
static int
1419,10 → 1411,7
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
 
// printf("page %x pin count %d\n",
// obj->pages.page, obj->pages_pin_count );
 
if (obj->pages.page == NULL)
if (obj->pages == NULL)
return 0;
 
BUG_ON(obj->gtt_space);
1430,10 → 1419,14
if (obj->pages_pin_count)
return -EBUSY;
 
/* ->put_pages might need to allocate memory for the bit17 swizzle
* array, hence protect them from being reaped by removing them from gtt
* lists early. */
list_del(&obj->gtt_list);
 
ops->put_pages(obj);
obj->pages.page = NULL;
obj->pages = NULL;
 
list_del(&obj->gtt_list);
if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
 
1450,43 → 1443,55
static int
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
{
dma_addr_t page;
int page_count, i;
struct sg_table *st;
struct scatterlist *sg;
struct page *page;
gfp_t gfp;
 
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
return -ENOMEM;
 
page_count = obj->base.size / PAGE_SIZE;
BUG_ON(obj->pages.page != NULL);
obj->pages.page = malloc(page_count * sizeof(dma_addr_t));
if (obj->pages.page == NULL)
if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
sg_free_table(st);
kfree(st);
return -ENOMEM;
}
 
for (i = 0; i < page_count; i++) {
/* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them.
*
* Fail silently without starting the shrinker
*/
for_each_sg(st->sgl, sg, page_count, i) {
page = AllocPage(); // oh-oh
if ( page == 0 )
goto err_pages;
 
obj->pages.page[i] = page;
};
DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
obj->pages.nents = page_count;
sg_set_page(sg, page, PAGE_SIZE, 0);
}
 
obj->pages = st;
 
// if (obj->tiling_mode != I915_TILING_NONE)
// i915_gem_object_do_bit_17_swizzle(obj);
// DRM_DEBUG_KMS("%s alloc %d pages\n", __FUNCTION__, page_count);
 
return 0;
 
err_pages:
while (i--)
FreePage(obj->pages.page[i]);
 
free(obj->pages.page);
obj->pages.page = NULL;
obj->pages.nents = 0;
 
return -ENOMEM;
for_each_sg(st->sgl, sg, i, page_count)
page_cache_release(sg_page(sg));
sg_free_table(st);
kfree(st);
return PTR_ERR(page);
}
 
/* Ensure that the associated pages are gathered from the backing storage
1503,7 → 1508,7
const struct drm_i915_gem_object_ops *ops = obj->ops;
int ret;
 
if (obj->pages.page)
if (obj->pages)
return 0;
 
BUG_ON(obj->pages_pin_count);
1518,11 → 1523,11
 
void
i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring,
u32 seqno)
struct intel_ring_buffer *ring)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 seqno = intel_ring_get_seqno(ring);
 
BUG_ON(ring == NULL);
obj->ring = ring;
1583,28 → 1588,56
WARN_ON(i915_verify_lists(dev));
}
 
static u32
i915_gem_get_seqno(struct drm_device *dev)
static int
i915_gem_handle_seqno_wrap(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
u32 seqno = dev_priv->next_seqno;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring;
int ret, i, j;
 
/* reserve 0 for non-seqno */
if (++dev_priv->next_seqno == 0)
dev_priv->next_seqno = 1;
/* The hardware uses various monotonic 32-bit counters, if we
* detect that they will wraparound we need to idle the GPU
* and reset those counters.
*/
ret = 0;
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ret |= ring->sync_seqno[j] != 0;
}
if (ret == 0)
return ret;
 
return seqno;
ret = i915_gpu_idle(dev);
if (ret)
return ret;
 
i915_gem_retire_requests(dev);
for_each_ring(ring, dev_priv, i) {
for (j = 0; j < ARRAY_SIZE(ring->sync_seqno); j++)
ring->sync_seqno[j] = 0;
}
 
u32
i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
return 0;
}
 
int
i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
{
if (ring->outstanding_lazy_request == 0)
ring->outstanding_lazy_request = i915_gem_get_seqno(ring->dev);
struct drm_i915_private *dev_priv = dev->dev_private;
 
return ring->outstanding_lazy_request;
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
int ret = i915_gem_handle_seqno_wrap(dev);
if (ret)
return ret;
 
dev_priv->next_seqno = 1;
}
 
*seqno = dev_priv->next_seqno++;
return 0;
}
 
int
i915_add_request(struct intel_ring_buffer *ring,
struct drm_file *file,
1613,7 → 1646,6
drm_i915_private_t *dev_priv = ring->dev->dev_private;
struct drm_i915_gem_request *request;
u32 request_ring_position;
u32 seqno;
int was_empty;
int ret;
 
1632,7 → 1664,6
if (request == NULL)
return -ENOMEM;
 
seqno = i915_gem_next_request_seqno(ring);
 
/* Record the position of the start of the request so that
* should we detect the updated seqno part-way through the
1641,15 → 1672,13
*/
request_ring_position = intel_ring_get_tail(ring);
 
ret = ring->add_request(ring, &seqno);
ret = ring->add_request(ring);
if (ret) {
kfree(request);
return ret;
}
 
trace_i915_gem_request_add(ring, seqno);
 
request->seqno = seqno;
request->seqno = intel_ring_get_seqno(ring);
request->ring = ring;
request->tail = request_ring_position;
request->emitted_jiffies = GetTimerTicks();
1674,7 → 1703,7
}
 
if (out_seqno)
*out_seqno = seqno;
*out_seqno = request->seqno;
return 0;
}
 
1759,7 → 1788,6
i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
{
uint32_t seqno;
int i;
 
if (list_empty(&ring->request_list))
return;
1768,10 → 1796,6
 
seqno = ring->get_seqno(ring, true);
 
for (i = 0; i < ARRAY_SIZE(ring->sync_seqno); i++)
if (seqno >= ring->sync_seqno[i])
ring->sync_seqno[i] = 0;
 
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
 
1891,6 → 1915,28
return 0;
}
 
/**
* i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
* @DRM_IOCTL_ARGS: standard ioctl arguments
*
* Returns 0 if successful, else an error is returned with the remaining time in
* the timeout parameter.
* -ETIME: object is still busy after timeout
* -ERESTARTSYS: signal interrupted the wait
* -ENONENT: object doesn't exist
* Also possible, but rare:
* -EAGAIN: GPU wedged
* -ENOMEM: damn
* -ENODEV: Internal IRQ fail
* -E?: The add request failed
*
* The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
* non-zero timeout parameter the wait ioctl will wait for the given number of
* nanoseconds on an object becoming unbusy. Since the wait itself does so
* without holding struct_mutex the object may become re-busied before this
* function completes. A similar but shorter * race condition exists in the busy
* ioctl
*/
 
 
 
1900,6 → 1946,11
 
 
 
 
 
 
 
 
/**
* i915_gem_object_sync - sync an object to a ring.
*
1938,7 → 1989,11
 
ret = to->sync_to(to, from, seqno);
if (!ret)
from->sync_seqno[idx] = seqno;
/* We use last_read_seqno because sync_to()
* might have just caused seqno wrap under
* the radar.
*/
from->sync_seqno[idx] = obj->last_read_seqno;
 
return ret;
}
1982,7 → 2037,7
if (obj->pin_count)
return -EBUSY;
 
BUG_ON(obj->pages.page == NULL);
BUG_ON(obj->pages == NULL);
 
ret = i915_gem_object_finish_gpu(obj);
if (ret)
2021,14 → 2076,6
return 0;
}
 
static int i915_ring_idle(struct intel_ring_buffer *ring)
{
if (list_empty(&ring->active_list))
return 0;
 
return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring));
}
 
int i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
2041,7 → 2088,7
if (ret)
return ret;
 
ret = i915_ring_idle(ring);
ret = intel_ring_idle(ring);
if (ret)
return ret;
}
2431,7 → 2478,7
{
struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *free_space;
struct drm_mm_node *node;
u32 size, fence_size, fence_alignment, unfenced_alignment;
bool mappable, fenceable;
int ret;
2475,66 → 2522,50
if (ret)
return ret;
 
i915_gem_object_pin_pages(obj);
 
node = kzalloc(sizeof(*node), GFP_KERNEL);
if (node == NULL) {
i915_gem_object_unpin_pages(obj);
return -ENOMEM;
}
 
search_free:
if (map_and_fenceable)
free_space =
drm_mm_search_free_in_range_color(&dev_priv->mm.gtt_space,
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
0, dev_priv->mm.gtt_mappable_end);
else
free_space = drm_mm_search_free_color(&dev_priv->mm.gtt_space,
size, alignment, obj->cache_level,
false);
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
if (ret) {
 
if (free_space != NULL) {
if (map_and_fenceable)
obj->gtt_space =
drm_mm_get_block_range_generic(free_space,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end,
false);
else
obj->gtt_space =
drm_mm_get_block_generic(free_space,
size, alignment, obj->cache_level,
false);
}
if (obj->gtt_space == NULL) {
ret = 1; //i915_gem_evict_something(dev, size, alignment,
// map_and_fenceable);
if (ret)
i915_gem_object_unpin_pages(obj);
kfree(node);
return ret;
 
goto search_free;
}
if (WARN_ON(!i915_gem_valid_gtt_space(dev,
obj->gtt_space,
obj->cache_level))) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
if (WARN_ON(!i915_gem_valid_gtt_space(dev, node, obj->cache_level))) {
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node);
return -EINVAL;
}
 
 
ret = i915_gem_gtt_prepare_object(obj);
if (ret) {
drm_mm_put_block(obj->gtt_space);
obj->gtt_space = NULL;
i915_gem_object_unpin_pages(obj);
drm_mm_put_block(node);
return ret;
}
 
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
 
list_move_tail(&obj->gtt_list, &dev_priv->mm.bound_list);
list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
obj->gtt_offset = obj->gtt_space->start;
obj->gtt_space = node;
obj->gtt_offset = node->start;
 
fenceable =
obj->gtt_space->size == fence_size &&
(obj->gtt_space->start & (fence_alignment - 1)) == 0;
node->size == fence_size &&
(node->start & (fence_alignment - 1)) == 0;
 
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2541,6 → 2572,7
 
obj->map_and_fenceable = mappable && fenceable;
 
i915_gem_object_unpin_pages(obj);
trace_i915_gem_object_bind(obj, map_and_fenceable);
i915_gem_verify_gtt(dev);
return 0;
2553,7 → 2585,7
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
if (obj->pages.page == NULL)
if (obj->pages == NULL)
return;
 
/* If the GPU is snooping the contents of the CPU cache,
2566,7 → 2598,7
*/
if (obj->cache_level != I915_CACHE_NONE)
return;
 
#if 0
if(obj->mapped != NULL)
{
uint8_t *page_virtual;
2613,6 → 2645,8
"mfence");
}
}
#endif
 
}
 
/** Flushes the GTT write domain for the object if it's dirty. */
2652,7 → 2686,7
return;
 
i915_gem_clflush_object(obj);
intel_gtt_chipset_flush();
i915_gem_chipset_flush(obj->base.dev);
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
 
2989,11 → 3023,16
#endif
 
if (obj->gtt_space == NULL) {
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
 
ret = i915_gem_object_bind_to_gtt(obj, alignment,
map_and_fenceable,
nonblocking);
if (ret)
return ret;
 
if (!dev_priv->mm.aliasing_ppgtt)
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
 
if (!obj->has_global_gtt_mapping && map_and_fenceable)
3047,14 → 3086,15
goto out;
}
 
obj->user_pin_count++;
obj->pin_filp = file;
if (obj->user_pin_count == 1) {
if (obj->user_pin_count == 0) {
ret = i915_gem_object_pin(obj, args->alignment, true, false);
if (ret)
goto out;
}
 
obj->user_pin_count++;
obj->pin_filp = file;
 
/* XXX - flush the CPU caches for pinned objects
* as the X server doesn't manage domains yet
*/
3295,7 → 3335,7
i915_gem_object_put_pages(obj);
// i915_gem_object_free_mmap_offset(obj);
 
BUG_ON(obj->pages.page);
BUG_ON(obj->pages);
 
// if (obj->base.import_attach)
// drm_prime_gem_destroy(&obj->base, NULL);
3358,7 → 3398,7
if (!IS_IVYBRIDGE(dev))
return;
 
if (!dev_priv->mm.l3_remap_info)
if (!dev_priv->l3_parity.remap_info)
return;
 
misccpctl = I915_READ(GEN7_MISCCPCTL);
3367,12 → 3407,12
 
for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
u32 remap = I915_READ(GEN7_L3LOG_BASE + i);
if (remap && remap != dev_priv->mm.l3_remap_info[i/4])
if (remap && remap != dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG("0x%x was already programmed to %x\n",
GEN7_L3LOG_BASE + i, remap);
if (remap && !dev_priv->mm.l3_remap_info[i/4])
if (remap && !dev_priv->l3_parity.remap_info[i/4])
DRM_DEBUG_DRIVER("Clearing remapped register\n");
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->mm.l3_remap_info[i/4]);
I915_WRITE(GEN7_L3LOG_BASE + i, dev_priv->l3_parity.remap_info[i/4]);
}
 
/* Make sure all the writes land before disabling dop clock gating */
3402,68 → 3442,6
I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
}
 
void i915_gem_init_ppgtt(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
 
if (!dev_priv->mm.aliasing_ppgtt)
return;
 
 
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = ppgtt->pt_pages[i];
 
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
 
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
 
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
 
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
 
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
 
static bool
intel_enable_blt(struct drm_device *dev)
{
3486,7 → 3464,7
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
 
if (!intel_enable_gtt())
if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
return -EIO;
 
if (IS_HASWELL(dev) && (I915_READ(0x120010) == 1))
/drivers/video/drm/i915/i915_gem_context.c
148,7 → 148,7
struct i915_hw_context *ctx;
int ret, id;
 
ctx = kzalloc(sizeof(struct drm_i915_file_private), GFP_KERNEL);
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return ERR_PTR(-ENOMEM);
 
420,9 → 420,8
* MI_SET_CONTEXT instead of when the next seqno has completed.
*/
if (from_obj != NULL) {
u32 seqno = i915_gem_next_request_seqno(ring);
from_obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
i915_gem_object_move_to_active(from_obj, ring, seqno);
i915_gem_object_move_to_active(from_obj, ring);
/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
* whole damn pipeline, we don't need to explicitly mark the
* object dirty. The only exception is that the context must be
/drivers/video/drm/i915/i915_gem_gtt.c
22,6 → 22,8
*
*/
 
#define iowrite32(v, addr) writel((v), (addr))
 
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "i915_drv.h"
32,19 → 34,67
#define AGP_USER_MEMORY (AGP_USER_TYPES)
#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
 
typedef uint32_t gtt_pte_t;
 
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
 
#define GEN6_PDE_VALID (1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
static inline gtt_pte_t pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
gtt_pte_t pte = GEN6_PTE_VALID;
pte |= GEN6_PTE_ADDR_ENCODE(addr);
 
switch (level) {
case I915_CACHE_LLC_MLC:
/* Haswell doesn't set L3 this way */
if (IS_HASWELL(dev))
pte |= GEN6_PTE_CACHE_LLC;
else
pte |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(dev))
pte |= HSW_PTE_UNCACHED;
else
pte |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
 
 
return pte;
}
 
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
uint32_t *pt_vaddr;
uint32_t scratch_pte;
gtt_pte_t *pt_vaddr;
gtt_pte_t scratch_pte;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
 
scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
 
pt_vaddr = AllocKernelSpace(4096);
 
56,7 → 106,7
if (last_pte > I915_PPGTT_PT_ENTRIES)
last_pte = I915_PPGTT_PT_ENTRIES;
 
MapPage(pt_vaddr,ppgtt->pt_pages[act_pd], 3);
MapPage(pt_vaddr,(addr_t)(ppgtt->pt_pages[act_pd]), 3);
 
for (i = first_pte; i < last_pte; i++)
pt_vaddr[i] = scratch_pte;
87,13 → 137,13
return ret;
 
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(dma_addr_t)*ppgtt->num_pd_entries,
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
goto err_ppgtt;
 
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = AllocPage();
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i])
goto err_pt_alloc;
}
128,7 → 178,7
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
 
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
 
dev_priv->mm.aliasing_ppgtt = ppgtt;
 
144,7 → 194,7
// kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
FreePage(ppgtt->pt_pages[i]);
FreePage((addr_t)(ppgtt->pt_pages[i]));
}
kfree(ppgtt->pt_pages);
err_ppgtt:
170,43 → 220,57
 
// kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
FreePage(ppgtt->pt_pages[i]);
FreePage((addr_t)(ppgtt->pt_pages[i]));
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
 
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct pagelist *pages,
const struct sg_table *pages,
unsigned first_entry,
uint32_t pte_flags)
enum i915_cache_level cache_level)
{
uint32_t *pt_vaddr, pte;
gtt_pte_t *pt_vaddr;
unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned i, j;
unsigned i, j, m, segment_len;
dma_addr_t page_addr;
struct scatterlist *sg;
 
/* init sg walking */
sg = pages->sgl;
i = 0;
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
 
pt_vaddr = AllocKernelSpace(4096);
if( pt_vaddr == NULL)
return;
 
if( pt_vaddr != NULL)
{
while (i < pages->nents)
{
MapPage(pt_vaddr, ppgtt->pt_pages[act_pd], 3);
while (i < pages->nents) {
MapPage(pt_vaddr,(addr_t)ppgtt->pt_pages[act_pd], 3);
 
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++, i++) {
page_addr = pages->page[i];
pte = GEN6_PTE_ADDR_ENCODE(page_addr);
pt_vaddr[j] = pte | pte_flags;
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
cache_level);
 
/* grab the next page */
if (++m == segment_len) {
if (++i == pages->nents)
break;
 
sg = sg_next(sg);
segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
m = 0;
}
}
 
 
first_pte = 0;
act_pd++;
}
FreeKernelSpace(pt_vaddr);
};
}
 
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
213,29 → 277,10
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
uint32_t pte_flags = GEN6_PTE_VALID;
 
switch (cache_level) {
case I915_CACHE_LLC_MLC:
pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
break;
case I915_CACHE_LLC:
pte_flags |= GEN6_PTE_CACHE_LLC;
break;
case I915_CACHE_NONE:
if (IS_HASWELL(obj->base.dev))
pte_flags |= HSW_PTE_UNCACHED;
else
pte_flags |= GEN6_PTE_UNCACHED;
break;
default:
BUG();
}
 
i915_ppgtt_insert_sg_entries(ppgtt,
&obj->pages,
obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
pte_flags);
cache_level);
}
 
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
246,26 → 291,68
obj->base.size >> PAGE_SHIFT);
}
 
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
enum i915_cache_level cache_level)
void i915_gem_init_ppgtt(struct drm_device *dev)
{
switch (cache_level) {
case I915_CACHE_LLC_MLC:
if (INTEL_INFO(dev)->gen >= 6)
return AGP_USER_CACHED_MEMORY_LLC_MLC;
/* Older chipsets do not have this extra level of CPU
* cacheing, so fallthrough and request the PTE simply
* as cached.
*/
case I915_CACHE_LLC:
return AGP_USER_CACHED_MEMORY;
default:
case I915_CACHE_NONE:
return AGP_USER_MEMORY;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t pd_offset;
struct intel_ring_buffer *ring;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
uint32_t __iomem *pd_addr;
uint32_t pd_entry;
int i;
 
if (!dev_priv->mm.aliasing_ppgtt)
return;
 
 
pd_addr = dev_priv->mm.gtt->gtt + ppgtt->pd_offset/sizeof(uint32_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
 
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
 
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
 
writel(pd_entry, pd_addr + i);
}
readl(pd_addr);
 
pd_offset = ppgtt->pd_offset;
pd_offset /= 64; /* in cachelines, */
pd_offset <<= 16;
 
if (INTEL_INFO(dev)->gen == 6) {
uint32_t ecochk, gab_ctl, ecobits;
 
ecobits = I915_READ(GAC_ECO_BITS);
I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
 
gab_ctl = I915_READ(GAB_CTL);
I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
 
ecochk = I915_READ(GAM_ECOCHK);
I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
ECOCHK_PPGTT_CACHE64B);
I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
} else if (INTEL_INFO(dev)->gen >= 7) {
I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
/* GFX_MODE is per-ring on gen7+ */
}
 
for_each_ring(ring, dev_priv, i) {
if (INTEL_INFO(dev)->gen >= 7)
I915_WRITE(RING_MODE_GEN7(ring),
_MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
 
I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
}
}
 
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
288,6 → 375,34
dev_priv->mm.interruptible = interruptible;
}
 
 
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = dev_priv->mm.gtt->gtt + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
 
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
 
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
 
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
 
 
#if 0
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
295,7 → 410,7
struct drm_i915_gem_object *obj;
 
/* First fill our portion of the GTT with scratch pages */
intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
 
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
303,30 → 418,105
i915_gem_gtt_bind_object(obj, obj->cache_level);
}
 
intel_gtt_chipset_flush();
i915_gem_chipset_flush(dev);
}
#endif
 
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct scatterlist *sg, *s;
unsigned int nents ;
int i;
 
if (obj->has_dma_mapping)
return 0;
 
sg = obj->pages->sgl;
nents = obj->pages->nents;
 
 
WARN_ON(nents == 0 || sg[0].length == 0);
 
for_each_sg(sg, s, nents, i) {
BUG_ON(!sg_page(s));
s->dma_address = sg_phys(s);
}
 
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
 
return 0;
}
 
/*
* Binds an object into the global gtt with the specified cache level. The object
* will be accessible to the GPU via commands whose operands reference offsets
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries = dev_priv->mm.gtt->gtt + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
 
for_each_sg(st->sgl, sg, st->nents, unused) {
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
i++;
}
}
 
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
 
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
* of NUMA access patterns. Therefore, even with the way we assume
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
 
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
 
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
}
 
intel_gtt_insert_sg_entries(&obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
agp_type);
obj->has_global_gtt_mapping = 1;
}
 
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
i915_ggtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
 
obj->has_global_gtt_mapping = 0;
384,5 → 574,276
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
 
/* ... but ensure that we clear the entire range. */
intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
i915_ggtt_clear_range(dev, start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}
 
static int setup_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct page *page;
dma_addr_t dma_addr;
 
page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
if (page == NULL)
return -ENOMEM;
 
#ifdef CONFIG_INTEL_IOMMU
dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, dma_addr))
return -EINVAL;
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
 
return 0;
}
 
 
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
return snb_gmch_ctl << 20;
}
 
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
 
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
return stolen_decoder[snb_gmch_ctl] << 20;
}
 
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
u16 snb_gmch_ctl;
int ret;
 
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
 
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
return -ENODEV;
}
return 0;
}
 
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
 
 
#ifdef CONFIG_INTEL_IOMMU
dev_priv->mm.gtt->needs_dmar = 1;
#endif
 
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
 
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
else
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
 
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
}
 
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
goto err_out;
}
 
dev_priv->mm.gtt->gtt = ioremap(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gtt->gtt) {
DRM_ERROR("Failed to map the gtt page table\n");
ret = -ENOMEM;
goto err_out;
}
 
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
 
return 0;
 
err_out:
kfree(dev_priv->mm.gtt);
return ret;
}
 
 
struct scatterlist *sg_next(struct scatterlist *sg)
{
if (sg_is_last(sg))
return NULL;
 
sg++;
if (unlikely(sg_is_chain(sg)))
sg = sg_chain_ptr(sg);
 
return sg;
}
 
 
void __sg_free_table(struct sg_table *table, unsigned int max_ents,
sg_free_fn *free_fn)
{
struct scatterlist *sgl, *next;
 
if (unlikely(!table->sgl))
return;
 
sgl = table->sgl;
while (table->orig_nents) {
unsigned int alloc_size = table->orig_nents;
unsigned int sg_size;
 
/*
* If we have more than max_ents segments left,
* then assign 'next' to the sg table after the current one.
* sg_size is then one less than alloc size, since the last
* element is the chain pointer.
*/
if (alloc_size > max_ents) {
next = sg_chain_ptr(&sgl[max_ents - 1]);
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else {
sg_size = alloc_size;
next = NULL;
}
 
table->orig_nents -= sg_size;
kfree(sgl);
sgl = next;
}
 
table->sgl = NULL;
}
 
void sg_free_table(struct sg_table *table)
{
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
}
 
int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
{
struct scatterlist *sg, *prv;
unsigned int left;
unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
 
#ifndef ARCH_HAS_SG_CHAIN
BUG_ON(nents > max_ents);
#endif
 
memset(table, 0, sizeof(*table));
 
left = nents;
prv = NULL;
do {
unsigned int sg_size, alloc_size = left;
 
if (alloc_size > max_ents) {
alloc_size = max_ents;
sg_size = alloc_size - 1;
} else
sg_size = alloc_size;
 
left -= sg_size;
 
sg = kmalloc(alloc_size * sizeof(struct scatterlist), gfp_mask);
if (unlikely(!sg)) {
/*
* Adjust entry count to reflect that the last
* entry of the previous table won't be used for
* linkage. Without this, sg_kfree() may get
* confused.
*/
if (prv)
table->nents = ++table->orig_nents;
 
goto err;
}
 
sg_init_table(sg, alloc_size);
table->nents = table->orig_nents += sg_size;
 
/*
* If this is the first mapping, assign the sg table header.
* If this is not the first mapping, chain previous part.
*/
if (prv)
sg_chain(prv, max_ents, sg);
else
table->sgl = sg;
 
/*
* If no more entries after this one, mark the end
*/
if (!left)
sg_mark_end(&sg[sg_size - 1]);
 
prv = sg;
} while (left);
 
return 0;
 
err:
__sg_free_table(table, SG_MAX_SINGLE_ALLOC, NULL);
 
return -ENOMEM;
}
 
 
void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
#ifdef CONFIG_DEBUG_SG
{
unsigned int i;
for (i = 0; i < nents; i++)
sgl[i].sg_magic = SG_MAGIC;
}
#endif
sg_mark_end(&sgl[nents - 1]);
}
 
/drivers/video/drm/i915/i915_irq.c
28,7 → 28,6
 
#define pr_fmt(fmt) ": " fmt
 
#include <linux/irqreturn.h>
#include <linux/slab.h>
#include <drm/drmP.h>
#include <drm/i915_drm.h>
40,16 → 39,7
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
 
#define DRM_IRQ_ARGS void *arg
 
static struct drm_driver {
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
int (*irq_postinstall) (struct drm_device *dev);
}drm_driver;
 
static struct drm_driver *driver = &drm_driver;
 
#define DRM_WAKEUP( queue ) wake_up( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
 
170,7 → 160,10
i915_pipe_enabled(struct drm_device *dev, int pipe)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE;
}
 
/* Called from drm generic code, passed a 'crtc', which
260,7 → 253,7
if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0)
return;
 
mutex_lock(&dev_priv->dev->struct_mutex);
mutex_lock(&dev_priv->rps.hw_lock);
 
if (pm_iir & GEN6_PM_RP_UP_THRESHOLD)
new_delay = dev_priv->rps.cur_delay + 1;
275,7 → 268,7
gen6_set_rps(dev_priv->dev, new_delay);
}
 
mutex_unlock(&dev_priv->dev->struct_mutex);
mutex_unlock(&dev_priv->rps.hw_lock);
}
 
 
291,7 → 284,7
static void ivybridge_parity_work(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
parity_error_work);
l3_parity.error_work);
u32 error_status, row, bank, subbank;
char *parity_event[5];
uint32_t misccpctl;
355,7 → 348,7
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
 
queue_work(dev_priv->wq, &dev_priv->parity_error_work);
queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
}
 
#endif
364,6 → 357,7
struct drm_i915_private *dev_priv,
u32 gt_iir)
{
printf("%s\n", __FUNCTION__);
 
if (gt_iir & (GEN6_RENDER_USER_INTERRUPT |
GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT))
405,10 → 399,10
POSTING_READ(GEN6_PMIMR);
spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
 
queue_work(dev_priv->wq, &dev_priv->rps.work);
// queue_work(dev_priv->wq, &dev_priv->rps.work);
}
 
static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t valleyview_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
419,6 → 413,8
u32 pipe_stats[I915_MAX_PIPES];
bool blc_event;
 
printf("%s\n", __FUNCTION__);
 
atomic_inc(&dev_priv->irq_received);
 
while (true) {
479,8 → 475,8
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
 
// if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
// gen6_queue_rps_work(dev_priv, pm_iir);
if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
gen6_queue_rps_work(dev_priv, pm_iir);
 
I915_WRITE(GTIIR, gt_iir);
I915_WRITE(GEN6_PMIIR, pm_iir);
496,6 → 492,8
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int pipe;
 
printf("%s\n", __FUNCTION__);
 
if (pch_iir & SDE_AUDIO_POWER_MASK)
DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
(pch_iir & SDE_AUDIO_POWER_MASK) >>
560,7 → 558,7
I915_READ(FDI_RX_IIR(pipe)));
}
 
static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
568,6 → 566,8
irqreturn_t ret = IRQ_NONE;
int i;
 
printf("%s\n", __FUNCTION__);
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
636,14 → 636,15
notify_ring(dev, &dev_priv->ring[VCS]);
}
 
static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t ironlake_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
 
printf("%s\n", __FUNCTION__);
 
atomic_inc(&dev_priv->irq_received);
 
/* disable master interrupt before clearing iir */
660,11 → 661,6
(!IS_GEN6(dev) || pm_iir == 0))
goto done;
 
if (HAS_PCH_CPT(dev))
hotplug_mask = SDE_HOTPLUG_MASK_CPT;
else
hotplug_mask = SDE_HOTPLUG_MASK;
 
ret = IRQ_HANDLED;
 
if (IS_GEN5(dev))
986,6 → 982,8
= I915_READ(RING_SYNC_0(ring->mmio_base));
error->semaphore_mboxes[ring->id][1]
= I915_READ(RING_SYNC_1(ring->mmio_base));
error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
}
 
if (INTEL_INFO(dev)->gen >= 4) {
1009,6 → 1007,7
error->acthd[ring->id] = intel_ring_get_active_head(ring);
error->head[ring->id] = I915_READ_HEAD(ring);
error->tail[ring->id] = I915_READ_TAIL(ring);
error->ctl[ring->id] = I915_READ_CTL(ring);
 
error->cpu_ring_head[ring->id] = ring->head;
error->cpu_ring_tail[ring->id] = ring->tail;
1103,6 → 1102,16
else
error->ier = I915_READ(IER);
 
if (INTEL_INFO(dev)->gen >= 6)
error->derrmr = I915_READ(DERRMR);
 
if (IS_VALLEYVIEW(dev))
error->forcewake = I915_READ(FORCEWAKE_VLV);
else if (INTEL_INFO(dev)->gen >= 7)
error->forcewake = I915_READ(FORCEWAKE_MT);
else if (INTEL_INFO(dev)->gen == 6)
error->forcewake = I915_READ(FORCEWAKE);
 
for_each_pipe(pipe)
error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
 
1333,7 → 1342,9
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
 
if (work == NULL || work->pending || !work->enable_stall_check) {
if (work == NULL ||
atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
!work->enable_stall_check) {
/* Either the pending flip IRQ arrived, or we're too early. Don't check */
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
1648,7 → 1659,7
/* Clear & enable PCU event interrupts */
I915_WRITE(DEIIR, DE_PCU_EVENT);
I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
// ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
}
 
return 0;
1710,6 → 1721,7
u32 enable_mask;
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
u32 render_irqs;
u16 msid;
 
enable_mask = I915_DISPLAY_PORT_INTERRUPT;
1730,11 → 1742,11
dev_priv->pipestat[1] = 0;
 
/* Hack for broken MSIs on VLV */
pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
pci_read_config_word(dev->pdev, 0x98, &msid);
msid &= 0xff; /* mask out delivery bits */
msid |= (1<<14);
pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
// pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000);
// pci_read_config_word(dev->pdev, 0x98, &msid);
// msid &= 0xff; /* mask out delivery bits */
// msid |= (1<<14);
// pci_write_config_word(dev_priv->dev->pdev, 0x98, msid);
 
I915_WRITE(VLV_IMR, dev_priv->irq_mask);
I915_WRITE(VLV_IER, enable_mask);
1749,21 → 1761,12
I915_WRITE(VLV_IIR, 0xffffffff);
I915_WRITE(VLV_IIR, 0xffffffff);
 
dev_priv->gt_irq_mask = ~0;
 
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIIR, I915_READ(GTIIR));
I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
I915_WRITE(GTIER, GT_GEN6_BLT_FLUSHDW_NOTIFY_INTERRUPT |
GT_GEN6_BLT_CS_ERROR_INTERRUPT |
GT_GEN6_BLT_USER_INTERRUPT |
GT_GEN6_BSD_USER_INTERRUPT |
GT_GEN6_BSD_CS_ERROR_INTERRUPT |
GT_GEN7_L3_PARITY_ERROR_INTERRUPT |
GT_PIPE_NOTIFY |
GT_RENDER_CS_ERROR_INTERRUPT |
GT_SYNC_STATUS |
GT_USER_INTERRUPT);
 
render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT |
GEN6_BLITTER_USER_INTERRUPT;
I915_WRITE(GTIER, render_irqs);
POSTING_READ(GTIER);
 
/* ack & enable invalid PTE error interrupts */
1781,9 → 1784,9
hotplug_en |= HDMIC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS)
hotplug_en |= HDMID_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOC_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS)
if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915)
hotplug_en |= SDVOB_HOTPLUG_INT_EN;
if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) {
hotplug_en |= CRT_HOTPLUG_INT_EN;
1796,7 → 1799,6
return 0;
}
 
 
static void valleyview_irq_uninstall(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1886,8 → 1888,7
return 0;
}
 
 
static irqreturn_t i8xx_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2068,7 → 2069,7
return 0;
}
 
static irqreturn_t i915_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t i915_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2307,7 → 2308,7
return 0;
}
 
static irqreturn_t i965_irq_handler(DRM_IRQ_ARGS)
static irqreturn_t i965_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2451,38 → 2452,49
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_VALLEYVIEW(dev)) {
driver->irq_handler = valleyview_irq_handler;
driver->irq_preinstall = valleyview_irq_preinstall;
driver->irq_postinstall = valleyview_irq_postinstall;
dev->driver->irq_handler = valleyview_irq_handler;
dev->driver->irq_preinstall = valleyview_irq_preinstall;
dev->driver->irq_postinstall = valleyview_irq_postinstall;
} else if (IS_IVYBRIDGE(dev)) {
/* Share pre & uninstall handlers with ILK/SNB */
driver->irq_handler = ivybridge_irq_handler;
driver->irq_preinstall = ironlake_irq_preinstall;
driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
} else if (IS_HASWELL(dev)) {
/* Share interrupts handling with IVB */
driver->irq_handler = ivybridge_irq_handler;
driver->irq_preinstall = ironlake_irq_preinstall;
driver->irq_postinstall = ivybridge_irq_postinstall;
dev->driver->irq_handler = ivybridge_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ivybridge_irq_postinstall;
} else if (HAS_PCH_SPLIT(dev)) {
driver->irq_handler = ironlake_irq_handler;
driver->irq_preinstall = ironlake_irq_preinstall;
driver->irq_postinstall = ironlake_irq_postinstall;
dev->driver->irq_handler = ironlake_irq_handler;
dev->driver->irq_preinstall = ironlake_irq_preinstall;
dev->driver->irq_postinstall = ironlake_irq_postinstall;
} else {
if (INTEL_INFO(dev)->gen == 2) {
} else if (INTEL_INFO(dev)->gen == 3) {
driver->irq_handler = i915_irq_handler;
driver->irq_preinstall = i915_irq_preinstall;
driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_preinstall = i915_irq_preinstall;
dev->driver->irq_postinstall = i915_irq_postinstall;
dev->driver->irq_handler = i915_irq_handler;
} else {
driver->irq_handler = i965_irq_handler;
driver->irq_preinstall = i965_irq_preinstall;
driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_preinstall = i965_irq_preinstall;
dev->driver->irq_postinstall = i965_irq_postinstall;
dev->driver->irq_handler = i965_irq_handler;
}
}
 
printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
}
 
irqreturn_t intel_irq_handler(struct drm_device *dev)
{
 
printf("i915 irq\n");
 
// printf("device %p driver %p handler %p\n", dev, dev->driver, dev->driver->irq_handler) ;
 
return dev->driver->irq_handler(0, dev);
}
 
int drm_irq_install(struct drm_device *dev)
{
unsigned long sh_flags = 0;
2511,14 → 2523,14
DRM_DEBUG("irq=%d\n", drm_dev_to_irq(dev));
 
/* Before installing handler */
if (driver->irq_preinstall)
driver->irq_preinstall(dev);
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
 
ret = AttachIntHandler(irq_line, driver->irq_handler, (u32)dev);
ret = AttachIntHandler(irq_line, intel_irq_handler, (u32)dev);
 
/* After installing handler */
if (driver->irq_postinstall)
ret = driver->irq_postinstall(dev);
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
 
if (ret < 0) {
DRM_ERROR(__FUNCTION__);
/drivers/video/drm/i915/i915_reg.h
26,6 → 26,7
#define _I915_REG_H_
 
#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
 
#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
 
40,7 → 41,15
*/
#define INTEL_GMCH_CTRL 0x52
#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define SNB_GMCH_CTRL 0x50
#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
#define SNB_GMCH_GGMS_MASK 0x3
#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
#define SNB_GMCH_GMS_MASK 0x1f
#define IVB_GMCH_GMS_SHIFT 4
#define IVB_GMCH_GMS_MASK 0xf
 
 
/* PCI config space */
 
#define HPLLCC 0xc0 /* 855 only */
105,23 → 114,6
#define GEN6_GRDOM_MEDIA (1 << 2)
#define GEN6_GRDOM_BLT (1 << 3)
 
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
 
#define GEN6_PDE_VALID (1 << 0)
#define GEN6_PDE_LARGE_PAGE (2 << 0) /* use 32kb pages */
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
#define GEN6_PTE_VALID (1 << 0)
#define GEN6_PTE_UNCACHED (1 << 1)
#define HSW_PTE_UNCACHED (0)
#define GEN6_PTE_CACHE_LLC (2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_CACHE_BITS (3 << 1)
#define GEN6_PTE_GFDT (1 << 3)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
 
#define RING_PP_DIR_BASE(ring) ((ring)->mmio_base+0x228)
#define RING_PP_DIR_BASE_READ(ring) ((ring)->mmio_base+0x518)
#define RING_PP_DIR_DCLV(ring) ((ring)->mmio_base+0x220)
241,11 → 233,18
*/
#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
#define MI_INVALIDATE_BSD (1<<7)
#define MI_FLUSH_DW_USE_GTT (1<<2)
#define MI_FLUSH_DW_USE_PPGTT (0<<2)
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
/* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_PPGTT_HSW (1<<8)
#define MI_BATCH_NON_SECURE_HSW (1<<13)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
369,6 → 368,7
#define DPIO_PLL_MODESEL_SHIFT 24 /* 3 bits */
#define DPIO_BIAS_CURRENT_CTL_SHIFT 21 /* 3 bits, always 0x7 */
#define DPIO_PLL_REFCLK_SEL_SHIFT 16 /* 2 bits */
#define DPIO_PLL_REFCLK_SEL_MASK 3
#define DPIO_DRIVER_CTL_SHIFT 12 /* always set to 0x8 */
#define DPIO_CLK_BIAS_CTL_SHIFT 8 /* always set to 0x5 */
#define _DPIO_REFSFR_B 0x8034
384,6 → 384,9
 
#define DPIO_FASTCLK_DISABLE 0x8100
 
#define DPIO_DATA_CHANNEL1 0x8220
#define DPIO_DATA_CHANNEL2 0x8420
 
/*
* Fence registers
*/
509,11 → 512,14
#define GEN7_ERR_INT 0x44040
#define ERR_INT_MMIO_UNCLAIMED (1<<13)
 
#define DERRMR 0x44050
 
/* GM45+ chicken bits -- debug workaround bits that may be required
* for various sorts of correct behavior. The top 16 bits of each are
* the enables for writing to the corresponding low bit.
*/
#define _3D_CHICKEN 0x02084
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 0x0208c
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
521,14 → 527,17
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 0x02090
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
 
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
 
#define GEN6_GT_MODE 0x20d0
#define GEN6_GT_MODE_HI (1 << 9)
#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
 
#define GFX_MODE 0x02520
#define GFX_MODE_GEN7 0x0229c
547,6 → 556,8
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE 0x182060
#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
661,6 → 672,7
#define MI_ARB_DISPLAY_PRIORITY_B_A (1 << 0) /* display B > display A */
 
#define CACHE_MODE_0 0x02120 /* 915+ only */
#define CM0_PIPELINED_RENDER_FLUSH_DISABLE (1<<8)
#define CM0_IZ_OPT_DISABLE (1<<6)
#define CM0_ZR_OPT_DISABLE (1<<5)
#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
670,6 → 682,8
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
#define GFX_FLSH_CNTL_GEN6 0x101008
#define GFX_FLSH_CNTL_EN (1<<0)
#define ECOSKPD 0x021d0
#define ECO_GATING_CX_ONLY (1<<3)
#define ECO_FLIP_DONE (1<<0)
1559,14 → 1573,14
#define _VSYNCSHIFT_B 0x61028
 
 
#define HTOTAL(pipe) _PIPE(pipe, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(pipe) _PIPE(pipe, _HBLANK_A, _HBLANK_B)
#define HSYNC(pipe) _PIPE(pipe, _HSYNC_A, _HSYNC_B)
#define VTOTAL(pipe) _PIPE(pipe, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, _VBLANK_A, _VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, _VSYNC_A, _VSYNC_B)
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
#define HBLANK(trans) _TRANSCODER(trans, _HBLANK_A, _HBLANK_B)
#define HSYNC(trans) _TRANSCODER(trans, _HSYNC_A, _HSYNC_B)
#define VTOTAL(trans) _TRANSCODER(trans, _VTOTAL_A, _VTOTAL_B)
#define VBLANK(trans) _TRANSCODER(trans, _VBLANK_A, _VBLANK_B)
#define VSYNC(trans) _TRANSCODER(trans, _VSYNC_A, _VSYNC_B)
#define BCLRPAT(pipe) _PIPE(pipe, _BCLRPAT_A, _BCLRPAT_B)
#define VSYNCSHIFT(pipe) _PIPE(pipe, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
#define VSYNCSHIFT(trans) _TRANSCODER(trans, _VSYNCSHIFT_A, _VSYNCSHIFT_B)
 
/* VGA port control */
#define ADPA 0x61100
2641,6 → 2655,7
#define PIPECONF_GAMMA (1<<24)
#define PIPECONF_FORCE_BORDER (1<<25)
#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_INTERLACE_MASK_HSW (3 << 21)
/* Note that pre-gen3 does not support interlaced display directly. Panel
* fitting must be disabled on pre-ilk for interlaced. */
#define PIPECONF_PROGRESSIVE (0 << 21)
2711,7 → 2726,7
#define PIPE_12BPC (3 << 5)
 
#define PIPESRC(pipe) _PIPE(pipe, _PIPEASRC, _PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, _PIPEACONF, _PIPEBCONF)
#define PIPECONF(tran) _TRANSCODER(tran, _PIPEACONF, _PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, _PIPEADSL, _PIPEBDSL)
#define PIPEFRAME(pipe) _PIPE(pipe, _PIPEAFRAMEHIGH, _PIPEBFRAMEHIGH)
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
2998,12 → 3013,19
#define DISPPLANE_GAMMA_ENABLE (1<<30)
#define DISPPLANE_GAMMA_DISABLE 0
#define DISPPLANE_PIXFORMAT_MASK (0xf<<26)
#define DISPPLANE_YUV422 (0x0<<26)
#define DISPPLANE_8BPP (0x2<<26)
#define DISPPLANE_15_16BPP (0x4<<26)
#define DISPPLANE_16BPP (0x5<<26)
#define DISPPLANE_32BPP_NO_ALPHA (0x6<<26)
#define DISPPLANE_32BPP (0x7<<26)
#define DISPPLANE_32BPP_30BIT_NO_ALPHA (0xa<<26)
#define DISPPLANE_BGRA555 (0x3<<26)
#define DISPPLANE_BGRX555 (0x4<<26)
#define DISPPLANE_BGRX565 (0x5<<26)
#define DISPPLANE_BGRX888 (0x6<<26)
#define DISPPLANE_BGRA888 (0x7<<26)
#define DISPPLANE_RGBX101010 (0x8<<26)
#define DISPPLANE_RGBA101010 (0x9<<26)
#define DISPPLANE_BGRX101010 (0xa<<26)
#define DISPPLANE_RGBX161616 (0xc<<26)
#define DISPPLANE_RGBX888 (0xe<<26)
#define DISPPLANE_RGBA888 (0xf<<26)
#define DISPPLANE_STEREO_ENABLE (1<<25)
#define DISPPLANE_STEREO_DISABLE 0
#define DISPPLANE_SEL_PIPE_SHIFT 24
3024,6 → 3046,8
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
 
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
3033,6 → 3057,8
#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
#define DSPLINOFF(plane) DSPADDR(plane)
#define DSPOFFSET(plane) _PIPE(plane, _DSPAOFFSET, _DSPBOFFSET)
#define DSPSURFLIVE(plane) _PIPE(plane, _DSPASURFLIVE, _DSPBSURFLIVE)
 
/* Display/Sprite base address macros */
#define DISP_BASEADDR_MASK (0xfffff000)
3078,6 → 3104,8
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
#define _DSPBOFFSET 0x711A4
#define _DSPBSURFLIVE 0x711AC
 
/* Sprite A control */
#define _DVSACNTR 0x72180
3143,6 → 3171,7
#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
#define DVSSURFLIVE(pipe) _PIPE(pipe, _DVSASURFLIVE, _DVSBSURFLIVE)
 
#define _SPRA_CTL 0x70280
#define SPRITE_ENABLE (1<<31)
3177,6 → 3206,8
#define _SPRA_SURF 0x7029c
#define _SPRA_KEYMAX 0x702a0
#define _SPRA_TILEOFF 0x702a4
#define _SPRA_OFFSET 0x702a4
#define _SPRA_SURFLIVE 0x702ac
#define _SPRA_SCALE 0x70304
#define SPRITE_SCALE_ENABLE (1<<31)
#define SPRITE_FILTER_MASK (3<<29)
3197,6 → 3228,8
#define _SPRB_SURF 0x7129c
#define _SPRB_KEYMAX 0x712a0
#define _SPRB_TILEOFF 0x712a4
#define _SPRB_OFFSET 0x712a4
#define _SPRB_SURFLIVE 0x712ac
#define _SPRB_SCALE 0x71304
#define _SPRB_GAMC 0x71400
 
3210,8 → 3243,10
#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
#define SPROFFSET(pipe) _PIPE(pipe, _SPRA_OFFSET, _SPRB_OFFSET)
#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
#define SPRSURFLIVE(pipe) _PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE)
 
/* VBIOS regs */
#define VGACNTRL 0x71400
3246,12 → 3281,6
#define DISPLAY_PORT_PLL_BIOS_1 0x46010
#define DISPLAY_PORT_PLL_BIOS_2 0x46014
 
#define PCH_DSPCLK_GATE_D 0x42020
# define DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
# define DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
# define DPFDUNIT_CLOCK_GATE_DISABLE (1 << 7)
# define DPARBUNIT_CLOCK_GATE_DISABLE (1 << 5)
 
#define PCH_3DCGDIS0 0x46020
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
3301,14 → 3330,14
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
 
#define PIPE_DATA_M1(pipe) _PIPE(pipe, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(pipe) _PIPE(pipe, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(pipe) _PIPE(pipe, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(pipe) _PIPE(pipe, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(pipe) _PIPE(pipe, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(pipe) _PIPE(pipe, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(pipe) _PIPE(pipe, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(pipe) _PIPE(pipe, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
#define PIPE_DATA_M2(tran) _TRANSCODER(tran, _PIPEA_DATA_M2, _PIPEB_DATA_M2)
#define PIPE_DATA_N2(tran) _TRANSCODER(tran, _PIPEA_DATA_N2, _PIPEB_DATA_N2)
#define PIPE_LINK_M1(tran) _TRANSCODER(tran, _PIPEA_LINK_M1, _PIPEB_LINK_M1)
#define PIPE_LINK_N1(tran) _TRANSCODER(tran, _PIPEA_LINK_N1, _PIPEB_LINK_N1)
#define PIPE_LINK_M2(tran) _TRANSCODER(tran, _PIPEA_LINK_M2, _PIPEB_LINK_M2)
#define PIPE_LINK_N2(tran) _TRANSCODER(tran, _PIPEA_LINK_N2, _PIPEB_LINK_N2)
 
/* CPU panel fitter */
/* IVB+ has 3 fitters, 0 is 7x5 capable, the other two only 3x3 */
3315,6 → 3344,8
#define _PFA_CTL_1 0x68080
#define _PFB_CTL_1 0x68880
#define PF_ENABLE (1<<31)
#define PF_PIPE_SEL_MASK_IVB (3<<29)
#define PF_PIPE_SEL_IVB(pipe) ((pipe)<<29)
#define PF_FILTER_MASK (3<<23)
#define PF_FILTER_PROGRAMMED (0<<23)
#define PF_FILTER_MED_3x3 (1<<23)
3423,15 → 3454,13
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define IVB_VRHUNIT_CLK_GATE (1<<28)
#define ILK_DPARB_CLK_GATE (1<<5)
#define ILK_DPFD_CLK_GATE (1<<7)
 
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
#define ILK_CLK_FBC (1<<7)
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
#define ILK_DSPCLK_GATE_D 0x42020
#define ILK_VRHUNIT_CLOCK_GATE_DISABLE (1 << 28)
#define ILK_DPFCUNIT_CLOCK_GATE_DISABLE (1 << 9)
#define ILK_DPFCRUNIT_CLOCK_GATE_DISABLE (1 << 8)
#define ILK_DPFDUNIT_CLOCK_GATE_ENABLE (1 << 7)
#define ILK_DPARBUNIT_CLOCK_GATE_ENABLE (1 << 5)
 
#define IVB_CHICKEN3 0x4200c
# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
3447,14 → 3476,21
 
#define GEN7_L3CNTLREG1 0xB01C
#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
#define GEN7_L3AGDIS (1<<19)
 
#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
 
#define GEN7_L3SQCREG4 0xb034
#define L3SQ_URB_READ_CAM_MATCH_DISABLE (1<<27)
 
/* WaCatErrorRejectionIssue */
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
 
#define HSW_FUSE_STRAP 0x42014
#define HSW_CDCLK_LIMIT (1 << 24)
 
/* PCH */
 
/* south display engine interrupt: IBX */
3686,7 → 3722,7
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
 
#define VLV_VIDEO_DIP_CTL_A 0x60220
#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
 
3795,17 → 3831,25
#define TRANS_6BPC (2<<5)
#define TRANS_12BPC (3<<5)
 
#define _TRANSA_CHICKEN1 0xf0060
#define _TRANSB_CHICKEN1 0xf1060
#define TRANS_CHICKEN1(pipe) _PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE (1<<4)
#define _TRANSA_CHICKEN2 0xf0064
#define _TRANSB_CHICKEN2 0xf1064
#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31)
#define TRANS_CHICKEN2_TIMING_OVERRIDE (1<<31)
 
 
#define SOUTH_CHICKEN1 0xc2000
#define FDIA_PHASE_SYNC_SHIFT_OVR 19
#define FDIA_PHASE_SYNC_SHIFT_EN 18
#define FDI_PHASE_SYNC_OVR(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
#define FDI_PHASE_SYNC_EN(pipe) (1<<(FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
#define FDI_BC_BIFURCATION_SELECT (1 << 12)
#define SOUTH_CHICKEN2 0xc2004
#define FDI_MPHY_IOSFSB_RESET_STATUS (1<<13)
#define FDI_MPHY_IOSFSB_RESET_CTL (1<<12)
#define DPLS_EDP_PPS_FIX_DIS (1<<0)
 
#define _FDI_RXA_CHICKEN 0xc200c
3816,6 → 3860,7
 
#define SOUTH_DSPCLK_GATE_D 0xc2020
#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
#define PCH_LP_PARTITION_LEVEL_DISABLE (1<<12)
 
/* CPU: FDI_TX */
#define _FDI_TXA_CTL 0x60100
3877,6 → 3922,7
#define FDI_FS_ERRC_ENABLE (1<<27)
#define FDI_FE_ERRC_ENABLE (1<<26)
#define FDI_DP_PORT_WIDTH_X8 (7<<19)
#define FDI_RX_POLARITY_REVERSED_LPT (1<<16)
#define FDI_8BPC (0<<16)
#define FDI_10BPC (1<<16)
#define FDI_6BPC (2<<16)
3903,14 → 3949,19
 
#define _FDI_RXA_MISC 0xf0010
#define _FDI_RXB_MISC 0xf1010
#define FDI_RX_PWRDN_LANE1_MASK (3<<26)
#define FDI_RX_PWRDN_LANE1_VAL(x) ((x)<<26)
#define FDI_RX_PWRDN_LANE0_MASK (3<<24)
#define FDI_RX_PWRDN_LANE0_VAL(x) ((x)<<24)
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
 
#define _FDI_RXA_TUSIZE1 0xf0030
#define _FDI_RXA_TUSIZE2 0xf0038
#define _FDI_RXB_TUSIZE1 0xf1030
#define _FDI_RXB_TUSIZE2 0xf1038
#define FDI_RX_TP1_TO_TP2_48 (2<<20)
#define FDI_RX_TP1_TO_TP2_64 (3<<20)
#define FDI_RX_FDI_DELAY_90 (0x90<<0)
#define FDI_RX_MISC(pipe) _PIPE(pipe, _FDI_RXA_MISC, _FDI_RXB_MISC)
#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE1, _FDI_RXB_TUSIZE1)
#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, _FDI_RXA_TUSIZE2, _FDI_RXB_TUSIZE2)
 
4003,6 → 4054,11
#define PANEL_LIGHT_ON_DELAY_SHIFT 0
 
#define PCH_PP_OFF_DELAYS 0xc720c
#define PANEL_POWER_PORT_SELECT_MASK (0x3 << 30)
#define PANEL_POWER_PORT_LVDS (0 << 30)
#define PANEL_POWER_PORT_DP_A (1 << 30)
#define PANEL_POWER_PORT_DP_C (2 << 30)
#define PANEL_POWER_PORT_DP_D (3 << 30)
#define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000)
#define PANEL_POWER_DOWN_DELAY_SHIFT 16
#define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff)
4050,7 → 4106,7
#define TRANS_DP_CTL_A 0xe0300
#define TRANS_DP_CTL_B 0xe1300
#define TRANS_DP_CTL_C 0xe2300
#define TRANS_DP_CTL(pipe) (TRANS_DP_CTL_A + (pipe) * 0x01000)
#define TRANS_DP_CTL(pipe) _PIPE(pipe, TRANS_DP_CTL_A, TRANS_DP_CTL_B)
#define TRANS_DP_OUTPUT_ENABLE (1<<31)
#define TRANS_DP_PORT_SEL_B (0<<29)
#define TRANS_DP_PORT_SEL_C (1<<29)
4108,6 → 4164,8
#define FORCEWAKE_ACK_HSW 0x130044
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_KERNEL 0x1
#define FORCEWAKE_USER 0x2
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
4220,6 → 4278,10
#define GEN6_READ_OC_PARAMS 0xc
#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
#define GEN6_ENCODE_RC6_VID(mv) (((mv) / 5) - 245) < 0 ?: 0
#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) > 0 ? ((vids) * 5) + 245 : 0)
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
 
4251,6 → 4313,15
#define GEN7_L3LOG_BASE 0xB070
#define GEN7_L3LOG_SIZE 0x80
 
#define GEN7_HALF_SLICE_CHICKEN1 0xe100 /* IVB GT1 + VLV */
#define GEN7_HALF_SLICE_CHICKEN1_GT2 0xf100
#define GEN7_MAX_PS_THREAD_DEP (8<<12)
#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
 
#define GEN7_ROW_CHICKEN2 0xe4f4
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
 
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
4380,33 → 4451,39
#define HSW_PWR_WELL_CTL6 0x45414
 
/* Per-pipe DDI Function Control */
#define PIPE_DDI_FUNC_CTL_A 0x60400
#define PIPE_DDI_FUNC_CTL_B 0x61400
#define PIPE_DDI_FUNC_CTL_C 0x62400
#define PIPE_DDI_FUNC_CTL_EDP 0x6F400
#define DDI_FUNC_CTL(pipe) _PIPE(pipe, PIPE_DDI_FUNC_CTL_A, \
PIPE_DDI_FUNC_CTL_B)
#define PIPE_DDI_FUNC_ENABLE (1<<31)
#define TRANS_DDI_FUNC_CTL_A 0x60400
#define TRANS_DDI_FUNC_CTL_B 0x61400
#define TRANS_DDI_FUNC_CTL_C 0x62400
#define TRANS_DDI_FUNC_CTL_EDP 0x6F400
#define TRANS_DDI_FUNC_CTL(tran) _TRANSCODER(tran, TRANS_DDI_FUNC_CTL_A, \
TRANS_DDI_FUNC_CTL_B)
#define TRANS_DDI_FUNC_ENABLE (1<<31)
/* Those bits are ignored by pipe EDP since it can only connect to DDI A */
#define PIPE_DDI_PORT_MASK (7<<28)
#define PIPE_DDI_SELECT_PORT(x) ((x)<<28)
#define PIPE_DDI_MODE_SELECT_MASK (7<<24)
#define PIPE_DDI_MODE_SELECT_HDMI (0<<24)
#define PIPE_DDI_MODE_SELECT_DVI (1<<24)
#define PIPE_DDI_MODE_SELECT_DP_SST (2<<24)
#define PIPE_DDI_MODE_SELECT_DP_MST (3<<24)
#define PIPE_DDI_MODE_SELECT_FDI (4<<24)
#define PIPE_DDI_BPC_MASK (7<<20)
#define PIPE_DDI_BPC_8 (0<<20)
#define PIPE_DDI_BPC_10 (1<<20)
#define PIPE_DDI_BPC_6 (2<<20)
#define PIPE_DDI_BPC_12 (3<<20)
#define PIPE_DDI_PVSYNC (1<<17)
#define PIPE_DDI_PHSYNC (1<<16)
#define PIPE_DDI_BFI_ENABLE (1<<4)
#define PIPE_DDI_PORT_WIDTH_X1 (0<<1)
#define PIPE_DDI_PORT_WIDTH_X2 (1<<1)
#define PIPE_DDI_PORT_WIDTH_X4 (3<<1)
#define TRANS_DDI_PORT_MASK (7<<28)
#define TRANS_DDI_SELECT_PORT(x) ((x)<<28)
#define TRANS_DDI_PORT_NONE (0<<28)
#define TRANS_DDI_MODE_SELECT_MASK (7<<24)
#define TRANS_DDI_MODE_SELECT_HDMI (0<<24)
#define TRANS_DDI_MODE_SELECT_DVI (1<<24)
#define TRANS_DDI_MODE_SELECT_DP_SST (2<<24)
#define TRANS_DDI_MODE_SELECT_DP_MST (3<<24)
#define TRANS_DDI_MODE_SELECT_FDI (4<<24)
#define TRANS_DDI_BPC_MASK (7<<20)
#define TRANS_DDI_BPC_8 (0<<20)
#define TRANS_DDI_BPC_10 (1<<20)
#define TRANS_DDI_BPC_6 (2<<20)
#define TRANS_DDI_BPC_12 (3<<20)
#define TRANS_DDI_PVSYNC (1<<17)
#define TRANS_DDI_PHSYNC (1<<16)
#define TRANS_DDI_EDP_INPUT_MASK (7<<12)
#define TRANS_DDI_EDP_INPUT_A_ON (0<<12)
#define TRANS_DDI_EDP_INPUT_A_ONOFF (4<<12)
#define TRANS_DDI_EDP_INPUT_B_ONOFF (5<<12)
#define TRANS_DDI_EDP_INPUT_C_ONOFF (6<<12)
#define TRANS_DDI_BFI_ENABLE (1<<4)
#define TRANS_DDI_PORT_WIDTH_X1 (0<<1)
#define TRANS_DDI_PORT_WIDTH_X2 (1<<1)
#define TRANS_DDI_PORT_WIDTH_X4 (3<<1)
 
/* DisplayPort Transport Control */
#define DP_TP_CTL_A 0x64040
4420,12 → 4497,16
#define DP_TP_CTL_LINK_TRAIN_MASK (7<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT1 (0<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT2 (1<<8)
#define DP_TP_CTL_LINK_TRAIN_PAT3 (4<<8)
#define DP_TP_CTL_LINK_TRAIN_IDLE (2<<8)
#define DP_TP_CTL_LINK_TRAIN_NORMAL (3<<8)
#define DP_TP_CTL_SCRAMBLE_DISABLE (1<<7)
 
/* DisplayPort Transport Status */
#define DP_TP_STATUS_A 0x64044
#define DP_TP_STATUS_B 0x64144
#define DP_TP_STATUS(port) _PORT(port, DP_TP_STATUS_A, DP_TP_STATUS_B)
#define DP_TP_STATUS_IDLE_DONE (1<<25)
#define DP_TP_STATUS_AUTOTRAIN_DONE (1<<12)
 
/* DDI Buffer Control */
4444,6 → 4525,7
#define DDI_BUF_EMP_800MV_3_5DB_HSW (8<<24) /* Sel8 */
#define DDI_BUF_EMP_MASK (0xf<<24)
#define DDI_BUF_IS_IDLE (1<<7)
#define DDI_A_4_LANES (1<<4)
#define DDI_PORT_WIDTH_X1 (0<<1)
#define DDI_PORT_WIDTH_X2 (1<<1)
#define DDI_PORT_WIDTH_X4 (3<<1)
4460,6 → 4542,10
#define SBI_ADDR 0xC6000
#define SBI_DATA 0xC6004
#define SBI_CTL_STAT 0xC6008
#define SBI_CTL_DEST_ICLK (0x0<<16)
#define SBI_CTL_DEST_MPHY (0x1<<16)
#define SBI_CTL_OP_IORD (0x2<<8)
#define SBI_CTL_OP_IOWR (0x3<<8)
#define SBI_CTL_OP_CRRD (0x6<<8)
#define SBI_CTL_OP_CRWR (0x7<<8)
#define SBI_RESPONSE_FAIL (0x1<<1)
4477,10 → 4563,12
#define SBI_SSCDIVINTPHASE_PROPAGATE (1<<0)
#define SBI_SSCCTL 0x020c
#define SBI_SSCCTL6 0x060C
#define SBI_SSCCTL_PATHALT (1<<3)
#define SBI_SSCCTL_DISABLE (1<<0)
#define SBI_SSCAUXDIV6 0x0610
#define SBI_SSCAUXDIV_FINALDIV2SEL(x) ((x)<<4)
#define SBI_DBUFF0 0x2a00
#define SBI_DBUFF0_ENABLE (1<<0)
 
/* LPT PIXCLK_GATE */
#define PIXCLK_GATE 0xC6020
4490,8 → 4578,8
/* SPLL */
#define SPLL_CTL 0x46020
#define SPLL_PLL_ENABLE (1<<31)
#define SPLL_PLL_SCC (1<<28)
#define SPLL_PLL_NON_SCC (2<<28)
#define SPLL_PLL_SSC (1<<28)
#define SPLL_PLL_NON_SSC (2<<28)
#define SPLL_PLL_FREQ_810MHz (0<<26)
#define SPLL_PLL_FREQ_1350MHz (1<<26)
 
4500,7 → 4588,7
#define WRPLL_CTL2 0x46060
#define WRPLL_PLL_ENABLE (1<<31)
#define WRPLL_PLL_SELECT_SSC (0x01<<28)
#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
#define WRPLL_PLL_SELECT_NON_SSC (0x02<<28)
#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
/* WRPLL divider programming */
#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
4517,21 → 4605,36
#define PORT_CLK_SEL_SPLL (3<<29)
#define PORT_CLK_SEL_WRPLL1 (4<<29)
#define PORT_CLK_SEL_WRPLL2 (5<<29)
#define PORT_CLK_SEL_NONE (7<<29)
 
/* Pipe clock selection */
#define PIPE_CLK_SEL_A 0x46140
#define PIPE_CLK_SEL_B 0x46144
#define PIPE_CLK_SEL(pipe) _PIPE(pipe, PIPE_CLK_SEL_A, PIPE_CLK_SEL_B)
/* For each pipe, we need to select the corresponding port clock */
#define PIPE_CLK_SEL_DISABLED (0x0<<29)
#define PIPE_CLK_SEL_PORT(x) ((x+1)<<29)
/* Transcoder clock selection */
#define TRANS_CLK_SEL_A 0x46140
#define TRANS_CLK_SEL_B 0x46144
#define TRANS_CLK_SEL(tran) _TRANSCODER(tran, TRANS_CLK_SEL_A, TRANS_CLK_SEL_B)
/* For each transcoder, we need to select the corresponding port clock */
#define TRANS_CLK_SEL_DISABLED (0x0<<29)
#define TRANS_CLK_SEL_PORT(x) ((x+1)<<29)
 
#define _TRANSA_MSA_MISC 0x60410
#define _TRANSB_MSA_MISC 0x61410
#define TRANS_MSA_MISC(tran) _TRANSCODER(tran, _TRANSA_MSA_MISC, \
_TRANSB_MSA_MISC)
#define TRANS_MSA_SYNC_CLK (1<<0)
#define TRANS_MSA_6_BPC (0<<5)
#define TRANS_MSA_8_BPC (1<<5)
#define TRANS_MSA_10_BPC (2<<5)
#define TRANS_MSA_12_BPC (3<<5)
#define TRANS_MSA_16_BPC (4<<5)
 
/* LCPLL Control */
#define LCPLL_CTL 0x130040
#define LCPLL_PLL_DISABLE (1<<31)
#define LCPLL_PLL_LOCK (1<<30)
#define LCPLL_CLK_FREQ_MASK (3<<26)
#define LCPLL_CLK_FREQ_450 (0<<26)
#define LCPLL_CD_CLOCK_DISABLE (1<<25)
#define LCPLL_CD2X_CLOCK_DISABLE (1<<23)
#define LCPLL_CD_SOURCE_FCLK (1<<21)
 
/* Pipe WM_LINETIME - watermark line time */
#define PIPE_WM_LINETIME_A 0x45270
/drivers/video/drm/i915/i915_trace.h
20,5 → 20,7
#define trace_i915_gem_request_wait_end(a, b)
#define trace_i915_gem_request_complete(a, b)
#define trace_intel_gpu_freq_change(a)
#define trace_i915_reg_rw(a, b, c, d)
#define trace_i915_ring_wait_begin(a)
 
#endif
/drivers/video/drm/i915/intel_bios.c
735,7 → 735,8
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* Set the Panel Power On/Off timings if uninitialized. */
if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
if (!HAS_PCH_SPLIT(dev) &&
I915_READ(PP_ON_DELAYS) == 0 && I915_READ(PP_OFF_DELAYS) == 0) {
/* Set T2 to 40ms and T5 to 200ms */
I915_WRITE(PP_ON_DELAYS, 0x019007d0);
 
/drivers/video/drm/i915/intel_crt.c
197,6 → 197,11
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
 
/* The FDI receiver on LPT only supports 8bpc and only has 2 lanes. */
if (HAS_PCH_LPT(dev) &&
(ironlake_get_lanes_required(mode->clock, 270000, 24) > 2))
return MODE_CLOCK_HIGH;
 
return MODE_OK;
}
 
220,7 → 225,11
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
 
if (HAS_PCH_SPLIT(dev))
adpa = ADPA_HOTPLUG_BITS;
else
adpa = 0;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
227,7 → 236,9
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
/* For CPT allow 3 pipe config, for others just use A or B */
if (HAS_PCH_CPT(dev))
if (HAS_PCH_LPT(dev))
; /* Those bits don't exist here */
else if (HAS_PCH_CPT(dev))
adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
else if (intel_crtc->pipe == 0)
adpa |= ADPA_PIPE_A_SELECT;
400,12 → 411,16
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret;
 
edid = intel_crt_get_edid(connector, adapter);
if (!edid)
return 0;
 
return intel_connector_update_modes(connector, edid);
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
 
return ret;
}
 
static bool intel_crt_detect_ddc(struct drm_connector *connector)
643,12 → 658,24
static void intel_crt_reset(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crt *crt = intel_attached_crt(connector);
 
if (HAS_PCH_SPLIT(dev))
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
 
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
 
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
}
 
}
 
/*
* Routines for controlling stuff on the analog port
*/
706,7 → 733,7
 
crt->base.type = INTEL_OUTPUT_ANALOG;
crt->base.cloneable = true;
if (IS_HASWELL(dev) || IS_I830(dev))
if (IS_I830(dev))
crt->base.crtc_mask = (1 << 0);
else
crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
726,6 → 753,9
 
crt->base.disable = intel_disable_crt;
crt->base.enable = intel_enable_crt;
if (IS_HASWELL(dev))
crt->base.get_hw_state = intel_ddi_get_hw_state;
else
crt->base.get_hw_state = intel_crt_get_hw_state;
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
743,18 → 773,14
* Configure the automatic hotplug detection stuff
*/
crt->force_hotplug_required = 0;
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
 
adpa = I915_READ(PCH_ADPA);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
 
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;
/*
* TODO: find a proper way to discover whether we need to set the
* polarity reversal bit or not, instead of relying on the BIOS.
*/
if (HAS_PCH_LPT(dev))
dev_priv->fdi_rx_polarity_reversed =
!!(I915_READ(_FDI_RXA_CTL) & FDI_RX_POLARITY_REVERSED_LPT);
}
 
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
/drivers/video/drm/i915/intel_ddi.c
58,6 → 58,26
0x00FFFFFF, 0x00040006 /* HDMI parameters */
};
 
static enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP ||
type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) {
struct intel_digital_port *intel_dig_port =
enc_to_dig_port(encoder);
return intel_dig_port->port;
 
} else if (type == INTEL_OUTPUT_ANALOG) {
return PORT_E;
 
} else {
DRM_ERROR("Invalid DDI encoder type %d\n", type);
BUG();
}
}
 
/* On Haswell, DDI port buffers must be programmed with correct values
* in advance. The buffer values are different for FDI and DP modes,
* but the HDMI/DVI fields are shared among those. So we program the DDI
118,7 → 138,20
DDI_BUF_EMP_800MV_3_5DB_HSW
};
 
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
enum port port)
{
uint32_t reg = DDI_BUF_CTL(port);
int i;
 
for (i = 0; i < 8; i++) {
udelay(1);
if (I915_READ(reg) & DDI_BUF_IS_IDLE)
return;
}
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
}
 
/* Starting with Haswell, different DDI ports can work in FDI mode for
* connection to the PCH-located connectors. For this, it is necessary to train
* both the DDI port and PCH receiver for the desired DDI buffer settings.
133,25 → 166,36
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
u32 reg, temp, i;
u32 temp, i, rx_ctl_val;
 
/* Configure CPU PLL, wait for warmup */
I915_WRITE(SPLL_CTL,
SPLL_PLL_ENABLE |
SPLL_PLL_FREQ_1350MHz |
SPLL_PLL_SCC);
/* Set the FDI_RX_MISC pwrdn lanes and the 2 workarounds listed at the
* mode set "sequence for CRT port" document:
* - TP1 to TP2 time with the default value
* - FDI delay to 90h
*/
I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) |
FDI_RX_PWRDN_LANE0_VAL(2) |
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
/* Use SPLL to drive the output when in FDI mode */
I915_WRITE(PORT_CLK_SEL(PORT_E),
PORT_CLK_SEL_SPLL);
I915_WRITE(PIPE_CLK_SEL(pipe),
PIPE_CLK_SEL_PORT(PORT_E));
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = FDI_RX_PLL_ENABLE | FDI_RX_ENHANCE_FRAME_ENABLE |
((intel_crtc->fdi_lanes - 1) << 19);
if (dev_priv->fdi_rx_polarity_reversed)
rx_ctl_val |= FDI_RX_POLARITY_REVERSED_LPT;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
udelay(220);
 
udelay(20);
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
 
/* Start the training iterating through available voltages and emphasis */
for (i=0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values); i++) {
/* Configure Port Clock Select */
I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->ddi_pll_sel);
 
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < ARRAY_SIZE(hsw_ddi_buf_ctl_values) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
I915_WRITE(DP_TP_CTL(PORT_E),
DP_TP_CTL_FDI_AUTOTRAIN |
160,41 → 204,37
DP_TP_CTL_ENABLE);
 
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage */
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp = (temp & ~DDI_BUF_EMP_MASK);
I915_WRITE(DDI_BUF_CTL(PORT_E),
temp |
DDI_BUF_CTL_ENABLE |
DDI_PORT_WIDTH_X2 |
hsw_ddi_buf_ctl_values[i]);
((intel_crtc->fdi_lanes - 1) << 1) |
hsw_ddi_buf_ctl_values[i / 2]);
POSTING_READ(DDI_BUF_CTL(PORT_E));
 
udelay(600);
 
/* We need to program FDI_RX_MISC with the default TP1 to TP2
* values before enabling the receiver, and configure the delay
* for the FDI timing generator to 90h. Luckily, all the other
* bits are supposed to be zeroed, so we can write those values
* directly.
*/
I915_WRITE(FDI_RX_MISC(pipe), FDI_RX_TP1_TO_TP2_48 |
FDI_RX_FDI_DELAY_90);
/* Program PCH FDI Receiver TU */
I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64));
 
/* Enable CPU FDI Receiver with auto-training */
reg = FDI_RX_CTL(pipe);
I915_WRITE(reg,
I915_READ(reg) |
FDI_LINK_TRAIN_AUTO |
FDI_RX_ENABLE |
FDI_LINK_TRAIN_PATTERN_1_CPT |
FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_PORT_WIDTH_2X_LPT |
FDI_RX_PLL_ENABLE);
POSTING_READ(reg);
udelay(100);
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
 
/* Wait for FDI receiver lane calibration */
udelay(30);
 
/* Unset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(_FDI_RXA_MISC);
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
I915_WRITE(_FDI_RXA_MISC, temp);
POSTING_READ(_FDI_RXA_MISC);
 
/* Wait for FDI auto training time */
udelay(5);
 
temp = I915_READ(DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_DRIVER("BUF_CTL training done on %d step\n", i);
DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
 
/* Enable normal pixel sending for FDI */
I915_WRITE(DP_TP_CTL(PORT_E),
203,60 → 243,36
DP_TP_CTL_ENHANCED_FRAME_ENABLE |
DP_TP_CTL_ENABLE);
 
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in FDI mode */
temp = I915_READ(DDI_FUNC_CTL(pipe));
temp &= ~PIPE_DDI_PORT_MASK;
temp |= PIPE_DDI_SELECT_PORT(PORT_E) |
PIPE_DDI_MODE_SELECT_FDI |
PIPE_DDI_FUNC_ENABLE |
PIPE_DDI_PORT_WIDTH_X2;
I915_WRITE(DDI_FUNC_CTL(pipe),
temp);
break;
} else {
DRM_ERROR("Error training BUF_CTL %d\n", i);
 
/* Disable DP_TP_CTL and FDI_RX_CTL) and retry */
I915_WRITE(DP_TP_CTL(PORT_E),
I915_READ(DP_TP_CTL(PORT_E)) &
~DP_TP_CTL_ENABLE);
I915_WRITE(FDI_RX_CTL(pipe),
I915_READ(FDI_RX_CTL(pipe)) &
~FDI_RX_PLL_ENABLE);
continue;
return;
}
}
 
DRM_DEBUG_KMS("FDI train done.\n");
}
temp = I915_READ(DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
POSTING_READ(DDI_BUF_CTL(PORT_E));
 
/* For DDI connections, it is possible to support different outputs over the
* same DDI port, such as HDMI or DP or even VGA via FDI. So we don't know by
* the time the output is detected what exactly is on the other end of it. This
* function aims at providing support for this detection and proper output
* configuration.
*/
void intel_ddi_init(struct drm_device *dev, enum port port)
{
/* For now, we don't do any proper output detection and assume that we
* handle HDMI only */
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
temp = I915_READ(DP_TP_CTL(PORT_E));
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(PORT_E), temp);
POSTING_READ(DP_TP_CTL(PORT_E));
 
switch(port){
case PORT_A:
/* We don't handle eDP and DP yet */
DRM_DEBUG_DRIVER("Found digital output on DDI port A\n");
break;
/* Assume that the ports B, C and D are working in HDMI mode for now */
case PORT_B:
case PORT_C:
case PORT_D:
intel_hdmi_init(dev, DDI_BUF_CTL(port), port);
break;
default:
DRM_DEBUG_DRIVER("No handlers defined for port %d, skipping DDI initialization\n",
port);
break;
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
 
rx_ctl_val &= ~FDI_RX_ENABLE;
I915_WRITE(_FDI_RXA_CTL, rx_ctl_val);
POSTING_READ(_FDI_RXA_CTL);
 
/* Reset FDI_RX_MISC pwrdn lanes */
temp = I915_READ(_FDI_RXA_MISC);
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
I915_WRITE(_FDI_RXA_MISC, temp);
POSTING_READ(_FDI_RXA_MISC);
}
 
DRM_ERROR("FDI link training failed!\n");
}
 
/* WRPLL clock dividers */
645,175 → 661,854
{298000, 2, 21, 19},
};
 
void intel_ddi_mode_set(struct drm_encoder *encoder,
static void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int port = intel_hdmi->ddi_port;
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
int port = intel_ddi_get_encoder_port(intel_encoder);
int pipe = intel_crtc->pipe;
int p, n2, r2;
u32 temp, i;
int type = intel_encoder->type;
 
/* On Haswell, we need to enable the clocks and prepare DDI function to
* work in HDMI mode for this pipe.
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_dp->DP = DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW;
switch (intel_dp->lane_count) {
case 1:
intel_dp->DP |= DDI_PORT_WIDTH_X1;
break;
case 2:
intel_dp->DP |= DDI_PORT_WIDTH_X2;
break;
case 4:
intel_dp->DP |= DDI_PORT_WIDTH_X4;
break;
default:
intel_dp->DP |= DDI_PORT_WIDTH_X4;
WARN(1, "Unexpected DP lane count %d\n",
intel_dp->lane_count);
break;
}
 
if (intel_dp->has_audio) {
DRM_DEBUG_DRIVER("DP audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("DP audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
}
 
intel_dp_init_link_config(intel_dp);
 
} else if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic
* and a new set of registers, so we leave it for future
* patch bombing.
*/
DRM_DEBUG_KMS("Preparing HDMI DDI mode for Haswell on port %c, pipe %c\n", port_name(port), pipe_name(pipe));
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
 
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
}
 
intel_hdmi->set_infoframes(encoder, adjusted_mode);
}
}
 
static struct intel_encoder *
intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder, *ret = NULL;
int num_encoders = 0;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
ret = intel_encoder;
num_encoders++;
}
 
if (num_encoders != 1)
WARN(1, "%d encoders on crtc for pipe %d\n", num_encoders,
intel_crtc->pipe);
 
BUG_ON(ret == NULL);
return ret;
}
 
void intel_ddi_put_crtc_pll(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t val;
 
switch (intel_crtc->ddi_pll_sel) {
case PORT_CLK_SEL_SPLL:
plls->spll_refcount--;
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Disabling SPLL\n");
val = I915_READ(SPLL_CTL);
WARN_ON(!(val & SPLL_PLL_ENABLE));
I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
POSTING_READ(SPLL_CTL);
}
break;
case PORT_CLK_SEL_WRPLL1:
plls->wrpll1_refcount--;
if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Disabling WRPLL 1\n");
val = I915_READ(WRPLL_CTL1);
WARN_ON(!(val & WRPLL_PLL_ENABLE));
I915_WRITE(WRPLL_CTL1, val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL1);
}
break;
case PORT_CLK_SEL_WRPLL2:
plls->wrpll2_refcount--;
if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Disabling WRPLL 2\n");
val = I915_READ(WRPLL_CTL2);
WARN_ON(!(val & WRPLL_PLL_ENABLE));
I915_WRITE(WRPLL_CTL2, val & ~WRPLL_PLL_ENABLE);
POSTING_READ(WRPLL_CTL2);
}
break;
}
 
WARN(plls->spll_refcount < 0, "Invalid SPLL refcount\n");
WARN(plls->wrpll1_refcount < 0, "Invalid WRPLL1 refcount\n");
WARN(plls->wrpll2_refcount < 0, "Invalid WRPLL2 refcount\n");
 
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_NONE;
}
 
static void intel_ddi_calculate_wrpll(int clock, int *p, int *n2, int *r2)
{
u32 i;
 
for (i = 0; i < ARRAY_SIZE(wrpll_tmds_clock_table); i++)
if (crtc->mode.clock <= wrpll_tmds_clock_table[i].clock)
if (clock <= wrpll_tmds_clock_table[i].clock)
break;
 
if (i == ARRAY_SIZE(wrpll_tmds_clock_table))
i--;
 
p = wrpll_tmds_clock_table[i].p;
n2 = wrpll_tmds_clock_table[i].n2;
r2 = wrpll_tmds_clock_table[i].r2;
*p = wrpll_tmds_clock_table[i].p;
*n2 = wrpll_tmds_clock_table[i].n2;
*r2 = wrpll_tmds_clock_table[i].r2;
 
if (wrpll_tmds_clock_table[i].clock != crtc->mode.clock)
if (wrpll_tmds_clock_table[i].clock != clock)
DRM_INFO("WR PLL: using settings for %dKHz on %dKHz mode\n",
wrpll_tmds_clock_table[i].clock, crtc->mode.clock);
wrpll_tmds_clock_table[i].clock, clock);
 
DRM_DEBUG_KMS("WR PLL: %dKHz refresh rate with p=%d, n2=%d r2=%d\n",
crtc->mode.clock, p, n2, r2);
clock, *p, *n2, *r2);
}
 
/* Enable LCPLL if disabled */
temp = I915_READ(LCPLL_CTL);
if (temp & LCPLL_PLL_DISABLE)
I915_WRITE(LCPLL_CTL,
temp & ~LCPLL_PLL_DISABLE);
bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
int type = intel_encoder->type;
enum pipe pipe = intel_crtc->pipe;
uint32_t reg, val;
 
/* Configure WR PLL 1, program the correct divider values for
* the desired frequency and wait for warmup */
I915_WRITE(WRPLL_CTL1,
WRPLL_PLL_ENABLE |
WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) |
WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p));
/* TODO: reuse PLLs when possible (compare values) */
 
udelay(20);
intel_ddi_put_crtc_pll(crtc);
 
/* Use WRPLL1 clock to drive the output to the port, and tell the pipe to use
* this port for connection.
*/
I915_WRITE(PORT_CLK_SEL(port),
PORT_CLK_SEL_WRPLL1);
I915_WRITE(PIPE_CLK_SEL(pipe),
PIPE_CLK_SEL_PORT(port));
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
switch (intel_dp->link_bw) {
case DP_LINK_BW_1_62:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
break;
case DP_LINK_BW_2_7:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
break;
case DP_LINK_BW_5_4:
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
break;
default:
DRM_ERROR("Link bandwidth %d unsupported\n",
intel_dp->link_bw);
return false;
}
 
/* We don't need to turn any PLL on because we'll use LCPLL. */
return true;
 
} else if (type == INTEL_OUTPUT_HDMI) {
int p, n2, r2;
 
if (plls->wrpll1_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 1 on pipe %c\n",
pipe_name(pipe));
plls->wrpll1_refcount++;
reg = WRPLL_CTL1;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL1;
} else if (plls->wrpll2_refcount == 0) {
DRM_DEBUG_KMS("Using WRPLL 2 on pipe %c\n",
pipe_name(pipe));
plls->wrpll2_refcount++;
reg = WRPLL_CTL2;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_WRPLL2;
} else {
DRM_ERROR("No WRPLLs available!\n");
return false;
}
 
WARN(I915_READ(reg) & WRPLL_PLL_ENABLE,
"WRPLL already enabled\n");
 
intel_ddi_calculate_wrpll(clock, &p, &n2, &r2);
 
val = WRPLL_PLL_ENABLE | WRPLL_PLL_SELECT_LCPLL_2700 |
WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
WRPLL_DIVIDER_POST(p);
 
} else if (type == INTEL_OUTPUT_ANALOG) {
if (plls->spll_refcount == 0) {
DRM_DEBUG_KMS("Using SPLL on pipe %c\n",
pipe_name(pipe));
plls->spll_refcount++;
reg = SPLL_CTL;
intel_crtc->ddi_pll_sel = PORT_CLK_SEL_SPLL;
}
 
WARN(I915_READ(reg) & SPLL_PLL_ENABLE,
"SPLL already enabled\n");
 
val = SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
 
} else {
WARN(1, "Invalid DDI encoder type %d\n", type);
return false;
}
 
I915_WRITE(reg, val);
udelay(20);
 
if (intel_hdmi->has_audio) {
/* Proper support for digital audio needs a new logic and a new set
* of registers, so we leave it for future patch bombing.
*/
DRM_DEBUG_DRIVER("HDMI audio on pipe %c on DDI\n",
pipe_name(intel_crtc->pipe));
return true;
}
 
/* write eld */
DRM_DEBUG_DRIVER("HDMI audio: write eld information\n");
intel_write_eld(encoder, adjusted_mode);
void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
int type = intel_encoder->type;
uint32_t temp;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
 
temp = TRANS_MSA_SYNC_CLK;
switch (intel_crtc->bpp) {
case 18:
temp |= TRANS_MSA_6_BPC;
break;
case 24:
temp |= TRANS_MSA_8_BPC;
break;
case 30:
temp |= TRANS_MSA_10_BPC;
break;
case 36:
temp |= TRANS_MSA_12_BPC;
break;
default:
temp |= TRANS_MSA_8_BPC;
WARN(1, "%d bpp unsupported by DDI function\n",
intel_crtc->bpp);
}
I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
}
}
 
/* Enable PIPE_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = PIPE_DDI_FUNC_ENABLE | PIPE_DDI_SELECT_PORT(port);
void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t temp;
 
/* Enable TRANS_DDI_FUNC_CTL for the pipe to work in HDMI mode */
temp = TRANS_DDI_FUNC_ENABLE;
temp |= TRANS_DDI_SELECT_PORT(port);
 
switch (intel_crtc->bpp) {
case 18:
temp |= PIPE_DDI_BPC_6;
temp |= TRANS_DDI_BPC_6;
break;
case 24:
temp |= PIPE_DDI_BPC_8;
temp |= TRANS_DDI_BPC_8;
break;
case 30:
temp |= PIPE_DDI_BPC_10;
temp |= TRANS_DDI_BPC_10;
break;
case 36:
temp |= PIPE_DDI_BPC_12;
temp |= TRANS_DDI_BPC_12;
break;
default:
WARN(1, "%d bpp unsupported by pipe DDI function\n",
WARN(1, "%d bpp unsupported by transcoder DDI function\n",
intel_crtc->bpp);
}
 
if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
temp |= TRANS_DDI_PVSYNC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DDI_PHSYNC;
 
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
break;
case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
break;
case PIPE_C:
temp |= TRANS_DDI_EDP_INPUT_C_ONOFF;
break;
default:
BUG();
break;
}
}
 
if (type == INTEL_OUTPUT_HDMI) {
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
 
if (intel_hdmi->has_hdmi_sink)
temp |= PIPE_DDI_MODE_SELECT_HDMI;
temp |= TRANS_DDI_MODE_SELECT_HDMI;
else
temp |= PIPE_DDI_MODE_SELECT_DVI;
temp |= TRANS_DDI_MODE_SELECT_DVI;
 
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
temp |= PIPE_DDI_PVSYNC;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
temp |= PIPE_DDI_PHSYNC;
} else if (type == INTEL_OUTPUT_ANALOG) {
temp |= TRANS_DDI_MODE_SELECT_FDI;
temp |= (intel_crtc->fdi_lanes - 1) << 1;
 
I915_WRITE(DDI_FUNC_CTL(pipe), temp);
} else if (type == INTEL_OUTPUT_DISPLAYPORT ||
type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_hdmi->set_infoframes(encoder, adjusted_mode);
temp |= TRANS_DDI_MODE_SELECT_DP_SST;
 
switch (intel_dp->lane_count) {
case 1:
temp |= TRANS_DDI_PORT_WIDTH_X1;
break;
case 2:
temp |= TRANS_DDI_PORT_WIDTH_X2;
break;
case 4:
temp |= TRANS_DDI_PORT_WIDTH_X4;
break;
default:
temp |= TRANS_DDI_PORT_WIDTH_X4;
WARN(1, "Unsupported lane count %d\n",
intel_dp->lane_count);
}
 
} else {
WARN(1, "Invalid encoder type %d for pipe %d\n",
intel_encoder->type, pipe);
}
 
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
 
void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
uint32_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
 
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
}
 
bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder = intel_connector->encoder;
int type = intel_connector->base.connector_type;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum pipe pipe = 0;
enum transcoder cpu_transcoder;
uint32_t tmp;
 
if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
return false;
 
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
else
cpu_transcoder = pipe;
 
tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
 
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
return (type == DRM_MODE_CONNECTOR_HDMIA);
 
case TRANS_DDI_MODE_SELECT_DP_SST:
if (type == DRM_MODE_CONNECTOR_eDP)
return true;
case TRANS_DDI_MODE_SELECT_DP_MST:
return (type == DRM_MODE_CONNECTOR_DisplayPort);
 
case TRANS_DDI_MODE_SELECT_FDI:
return (type == DRM_MODE_CONNECTOR_VGA);
 
default:
return false;
}
}
 
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe)
{
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum port port = intel_ddi_get_encoder_port(encoder);
u32 tmp;
int i;
 
tmp = I915_READ(DDI_BUF_CTL(intel_hdmi->ddi_port));
tmp = I915_READ(DDI_BUF_CTL(port));
 
if (!(tmp & DDI_BUF_CTL_ENABLE))
return false;
 
for_each_pipe(i) {
tmp = I915_READ(DDI_FUNC_CTL(i));
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
if ((tmp & PIPE_DDI_PORT_MASK)
== PIPE_DDI_SELECT_PORT(intel_hdmi->ddi_port)) {
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
*pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
*pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
*pipe = PIPE_C;
break;
}
 
return true;
} else {
for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
 
if ((tmp & TRANS_DDI_PORT_MASK)
== TRANS_DDI_SELECT_PORT(port)) {
*pipe = i;
return true;
}
}
}
 
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", intel_hdmi->ddi_port);
DRM_DEBUG_KMS("No pipe for ddi port %i found\n", port);
 
return true;
}
 
void intel_enable_ddi(struct intel_encoder *encoder)
static uint32_t intel_ddi_get_crtc_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_device *dev = encoder->base.dev;
uint32_t temp, ret;
enum port port;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int i;
 
if (cpu_transcoder == TRANSCODER_EDP) {
port = PORT_A;
} else {
temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
temp &= TRANS_DDI_PORT_MASK;
 
for (i = PORT_B; i <= PORT_E; i++)
if (temp == TRANS_DDI_SELECT_PORT(i))
port = i;
}
 
ret = I915_READ(PORT_CLK_SEL(port));
 
DRM_DEBUG_KMS("Pipe %c connected to port %c using clock 0x%08x\n",
pipe_name(pipe), port_name(port), ret);
 
return ret;
}
 
void intel_ddi_setup_hw_pll_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
enum pipe pipe;
struct intel_crtc *intel_crtc;
 
temp = I915_READ(DDI_BUF_CTL(port));
temp |= DDI_BUF_CTL_ENABLE;
for_each_pipe(pipe) {
intel_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
/* Enable DDI_BUF_CTL. In HDMI/DVI mode, the port width,
* and swing/emphasis values are ignored so nothing special needs
* to be done besides enabling the port.
if (!intel_crtc->active)
continue;
 
intel_crtc->ddi_pll_sel = intel_ddi_get_crtc_pll(dev_priv,
pipe);
 
switch (intel_crtc->ddi_pll_sel) {
case PORT_CLK_SEL_SPLL:
dev_priv->ddi_plls.spll_refcount++;
break;
case PORT_CLK_SEL_WRPLL1:
dev_priv->ddi_plls.wrpll1_refcount++;
break;
case PORT_CLK_SEL_WRPLL2:
dev_priv->ddi_plls.wrpll2_refcount++;
break;
}
}
}
 
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_PORT(port));
}
 
void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 
if (cpu_transcoder != TRANSCODER_EDP)
I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
TRANS_CLK_SEL_DISABLED);
}
 
static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_on(intel_dp);
ironlake_edp_panel_vdd_off(intel_dp, true);
}
 
WARN_ON(intel_crtc->ddi_pll_sel == PORT_CLK_SEL_NONE);
I915_WRITE(PORT_CLK_SEL(port), intel_crtc->ddi_pll_sel);
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
}
 
static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t val;
bool wait = false;
 
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), val);
wait = true;
}
 
val = I915_READ(DP_TP_CTL(port));
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
 
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
}
 
I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
}
 
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_HDMI) {
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
I915_WRITE(DDI_BUF_CTL(port), temp);
I915_WRITE(DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE);
} else if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
ironlake_edp_backlight_on(intel_dp);
}
}
 
void intel_disable_ddi(struct intel_encoder *encoder)
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_device *dev = encoder->base.dev;
struct drm_encoder *encoder = &intel_encoder->base;
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
 
ironlake_edp_backlight_off(intel_dp);
}
}
 
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
{
if (I915_READ(HSW_FUSE_STRAP) & HSW_CDCLK_LIMIT)
return 450;
else if ((I915_READ(LCPLL_CTL) & LCPLL_CLK_FREQ_MASK) ==
LCPLL_CLK_FREQ_450)
return 450;
else if (IS_ULT(dev_priv->dev))
return 338;
else
return 540;
}
 
void intel_ddi_pll_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
int port = intel_hdmi->ddi_port;
u32 temp;
uint32_t val = I915_READ(LCPLL_CTL);
 
temp = I915_READ(DDI_BUF_CTL(port));
temp &= ~DDI_BUF_CTL_ENABLE;
/* The LCPLL register should be turned on by the BIOS. For now let's
* just check its state and print errors in case something is wrong.
* Don't even try to turn it on.
*/
 
I915_WRITE(DDI_BUF_CTL(port), temp);
DRM_DEBUG_KMS("CDCLK running at %dMHz\n",
intel_ddi_get_cdclk_freq(dev_priv));
 
if (val & LCPLL_CD_SOURCE_FCLK)
DRM_ERROR("CDCLK source is not LCPLL\n");
 
if (val & LCPLL_PLL_DISABLE)
DRM_ERROR("LCPLL is disabled\n");
}
 
void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
enum port port = intel_dig_port->port;
bool wait;
uint32_t val;
 
if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) {
val = I915_READ(DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), val);
wait = true;
}
 
val = I915_READ(DP_TP_CTL(port));
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
 
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
}
 
val = DP_TP_CTL_ENABLE | DP_TP_CTL_MODE_SST |
DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_SCRAMBLE_DISABLE;
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
I915_WRITE(DP_TP_CTL(port), val);
POSTING_READ(DP_TP_CTL(port));
 
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
POSTING_READ(DDI_BUF_CTL(port));
 
udelay(600);
}
 
void intel_ddi_fdi_disable(struct drm_crtc *crtc)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
uint32_t val;
 
intel_ddi_post_disable(intel_encoder);
 
val = I915_READ(_FDI_RXA_CTL);
val &= ~FDI_RX_ENABLE;
I915_WRITE(_FDI_RXA_CTL, val);
 
val = I915_READ(_FDI_RXA_MISC);
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
I915_WRITE(_FDI_RXA_MISC, val);
 
val = I915_READ(_FDI_RXA_CTL);
val &= ~FDI_PCDCLK;
I915_WRITE(_FDI_RXA_CTL, val);
 
val = I915_READ(_FDI_RXA_CTL);
val &= ~FDI_RX_PLL_ENABLE;
I915_WRITE(_FDI_RXA_CTL, val);
}
 
static void intel_ddi_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int type = intel_encoder->type;
 
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP)
intel_dp_check_link_status(intel_dp);
}
 
static void intel_ddi_destroy(struct drm_encoder *encoder)
{
/* HDMI has nothing special to destroy, so we can go with this. */
intel_dp_encoder_destroy(encoder);
}
 
static bool intel_ddi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
int type = intel_encoder->type;
 
WARN(type == INTEL_OUTPUT_UNKNOWN, "mode_fixup() on unknown output!\n");
 
if (type == INTEL_OUTPUT_HDMI)
return intel_hdmi_mode_fixup(encoder, mode, adjusted_mode);
else
return intel_dp_mode_fixup(encoder, mode, adjusted_mode);
}
 
static const struct drm_encoder_funcs intel_ddi_funcs = {
.destroy = intel_ddi_destroy,
};
 
static const struct drm_encoder_helper_funcs intel_ddi_helper_funcs = {
.mode_fixup = intel_ddi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
 
void intel_ddi_init(struct drm_device *dev, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *hdmi_connector = NULL;
struct intel_connector *dp_connector = NULL;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
if (!intel_dig_port)
return;
 
dp_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!dp_connector) {
kfree(intel_dig_port);
return;
}
 
if (port != PORT_A) {
hdmi_connector = kzalloc(sizeof(struct intel_connector),
GFP_KERNEL);
if (!hdmi_connector) {
kfree(dp_connector);
kfree(intel_dig_port);
return;
}
}
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, encoder, &intel_ddi_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &intel_ddi_helper_funcs);
 
intel_encoder->enable = intel_enable_ddi;
intel_encoder->pre_enable = intel_ddi_pre_enable;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->post_disable = intel_ddi_post_disable;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
 
intel_dig_port->port = port;
if (hdmi_connector)
intel_dig_port->hdmi.sdvox_reg = DDI_BUF_CTL(port);
else
intel_dig_port->hdmi.sdvox_reg = 0;
intel_dig_port->dp.output_reg = DDI_BUF_CTL(port);
 
intel_encoder->type = INTEL_OUTPUT_UNKNOWN;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_ddi_hot_plug;
 
if (hdmi_connector)
intel_hdmi_init_connector(intel_dig_port, hdmi_connector);
intel_dp_init_connector(intel_dig_port, dp_connector);
}
/drivers/video/drm/i915/intel_display.c
51,24 → 51,11
 
#define MAX_ERRNO 4095
 
#define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
 
static inline long IS_ERR(const void *ptr)
{
return IS_ERR_VALUE((unsigned long)ptr);
}
 
static inline void *ERR_PTR(long error)
{
return (void *) error;
}
 
 
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
 
bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
static void intel_increase_pllclock(struct drm_crtc *crtc);
//static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
 
typedef struct {
/* given values */
103,6 → 90,16
/* FDI */
#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
 
int
intel_pch_rawclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
WARN_ON(!HAS_PCH_SPLIT(dev));
 
return I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK;
}
 
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *match_clock,
403,7 → 400,7
 
static const intel_limit_t intel_limits_vlv_hdmi = {
.dot = { .min = 20000, .max = 165000 },
.vco = { .min = 5994000, .max = 4000000 },
.vco = { .min = 4000000, .max = 5994000},
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m1 = { .min = 2, .max = 3 },
416,10 → 413,10
};
 
static const intel_limit_t intel_limits_vlv_dp = {
.dot = { .min = 162000, .max = 270000 },
.vco = { .min = 5994000, .max = 4000000 },
.dot = { .min = 25000, .max = 270000 },
.vco = { .min = 4000000, .max = 6000000 },
.n = { .min = 1, .max = 7 },
.m = { .min = 60, .max = 300 }, /* guess */
.m = { .min = 22, .max = 450 },
.m1 = { .min = 2, .max = 3 },
.m2 = { .min = 11, .max = 156 },
.p = { .min = 10, .max = 30 },
554,7 → 551,7
limit = &intel_limits_ironlake_single_lvds;
}
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
HAS_eDP)
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
limit = &intel_limits_ironlake_display_port;
else
limit = &intel_limits_ironlake_dac;
950,6 → 947,15
return true;
}
 
enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
return intel_crtc->cpu_transcoder;
}
 
static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
1022,9 → 1028,11
void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
if (INTEL_INFO(dev)->gen >= 4) {
int reg = PIPECONF(pipe);
int reg = PIPECONF(cpu_transcoder);
 
/* Wait for the Pipe State to go off */
if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1126,12 → 1134,14
int reg;
u32 val;
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
if (IS_HASWELL(dev_priv->dev)) {
/* On Haswell, DDI is used instead of FDI_TX_CTL */
reg = DDI_FUNC_CTL(pipe);
reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
reg = FDI_TX_CTL(pipe);
val = I915_READ(reg);
1151,14 → 1161,9
u32 val;
bool cur_state;
 
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
return;
} else {
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
cur_state = !!(val & FDI_RX_ENABLE);
}
WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
state_string(state), state_string(cur_state));
1191,10 → 1196,6
int reg;
u32 val;
 
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
return;
}
reg = FDI_RX_CTL(pipe);
val = I915_READ(reg);
WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1235,12 → 1236,14
int reg;
u32 val;
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
 
/* if we need the pipe A quirk it must be always on */
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
 
reg = PIPECONF(pipe);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
WARN(cur_state != state,
1515,25 → 1518,27
 
/* SBI access */
static void
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
enum intel_sbi_destination destination)
{
unsigned long flags;
u32 tmp;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
 
I915_WRITE(SBI_ADDR,
(reg << 16));
I915_WRITE(SBI_DATA,
value);
I915_WRITE(SBI_CTL_STAT,
SBI_BUSY |
SBI_CTL_OP_CRWR);
I915_WRITE(SBI_ADDR, (reg << 16));
I915_WRITE(SBI_DATA, value);
 
if (destination == SBI_ICLK)
tmp = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRWR;
else
tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1545,24 → 1550,26
}
 
static u32
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
enum intel_sbi_destination destination)
{
unsigned long flags;
u32 value = 0;
 
spin_lock_irqsave(&dev_priv->dpio_lock, flags);
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
100)) {
if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 100)) {
DRM_ERROR("timeout waiting for SBI to become ready\n");
goto out_unlock;
}
 
I915_WRITE(SBI_ADDR,
(reg << 16));
I915_WRITE(SBI_CTL_STAT,
SBI_BUSY |
SBI_CTL_OP_CRRD);
I915_WRITE(SBI_ADDR, (reg << 16));
 
if (destination == SBI_ICLK)
value = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
else
value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
 
if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
100)) {
DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1577,7 → 1584,7
}
 
/**
* intel_enable_pch_pll - enable PCH PLL
* ironlake_enable_pch_pll - enable PCH PLL
* @dev_priv: i915 private structure
* @pipe: pipe PLL to enable
*
1584,7 → 1591,7
* The PCH PLL needs to be enabled before the PCH transcoder, since it
* drives the transcoder clock.
*/
static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
static void ironlake_enable_pch_pll(struct intel_crtc *intel_crtc)
{
struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
struct intel_pch_pll *pll;
1668,12 → 1675,12
pll->on = false;
}
 
static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val, pipeconf_val;
struct drm_device *dev = dev_priv->dev;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
uint32_t reg, val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
1687,10 → 1694,15
assert_fdi_tx_enabled(dev_priv, pipe);
assert_fdi_rx_enabled(dev_priv, pipe);
 
if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
return;
if (HAS_PCH_CPT(dev)) {
/* Workaround: Set the timing override bit before enabling the
* pch transcoder. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
I915_WRITE(reg, val);
}
 
reg = TRANSCONF(pipe);
val = I915_READ(reg);
pipeconf_val = I915_READ(PIPECONF(pipe));
1719,11 → 1731,42
DRM_ERROR("failed to enable transcoder %d\n", pipe);
}
 
static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
u32 val, pipeconf_val;
 
/* PCH only available on ILK+ */
BUG_ON(dev_priv->info->gen < 5);
 
/* FDI must be feeding us bits for PCH ports */
assert_fdi_tx_enabled(dev_priv, cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
 
/* Workaround: set timing override bit. */
val = I915_READ(_TRANSA_CHICKEN2);
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
I915_WRITE(_TRANSA_CHICKEN2, val);
 
val = TRANS_ENABLE;
pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
 
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
PIPECONF_INTERLACED_ILK)
val |= TRANS_INTERLACED;
else
val |= TRANS_PROGRESSIVE;
 
I915_WRITE(TRANSCONF(TRANSCODER_A), val);
if (wait_for(I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("Failed to enable PCH transcoder\n");
}
 
static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
u32 val;
struct drm_device *dev = dev_priv->dev;
uint32_t reg, val;
 
/* FDI relies on the transcoder */
assert_fdi_tx_disabled(dev_priv, pipe);
1739,8 → 1782,33
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("failed to disable transcoder %d\n", pipe);
 
if (!HAS_PCH_IBX(dev)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
val = I915_READ(reg);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
I915_WRITE(reg, val);
}
}
 
static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
 
val = I915_READ(_TRANSACONF);
val &= ~TRANS_ENABLE;
I915_WRITE(_TRANSACONF, val);
/* wait for PCH transcoder off, transcoder state */
if (wait_for((I915_READ(_TRANSACONF) & TRANS_STATE_ENABLE) == 0, 50))
DRM_ERROR("Failed to disable PCH transcoder\n");
 
/* Workaround: clear timing override bit. */
val = I915_READ(_TRANSA_CHICKEN2);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
I915_WRITE(_TRANSA_CHICKEN2, val);
}
 
/**
* intel_enable_pipe - enable a pipe, asserting requirements
* @dev_priv: i915 private structure
1758,9 → 1826,17
static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
bool pch_port)
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
enum transcoder pch_transcoder;
int reg;
u32 val;
 
if (IS_HASWELL(dev_priv->dev))
pch_transcoder = TRANSCODER_A;
else
pch_transcoder = pipe;
 
/*
* A pipe without a PLL won't actually be able to drive bits from
* a plane. On ILK+ the pipe PLLs are integrated, so we don't
1771,13 → 1847,13
else {
if (pch_port) {
/* if driving the PCH, we need FDI enabled */
assert_fdi_rx_pll_enabled(dev_priv, pipe);
assert_fdi_tx_pll_enabled(dev_priv, pipe);
assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
assert_fdi_tx_pll_enabled(dev_priv, cpu_transcoder);
}
/* FIXME: assert CPU port conditions for SNB+ */
}
 
reg = PIPECONF(pipe);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if (val & PIPECONF_ENABLE)
return;
1801,6 → 1877,8
static void intel_disable_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int reg;
u32 val;
 
1814,7 → 1892,7
if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
return;
 
reg = PIPECONF(pipe);
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
1830,8 → 1908,10
void intel_flush_display_plane(struct drm_i915_private *dev_priv,
enum plane plane)
{
if (dev_priv->info->gen >= 4)
I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
else
I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
}
 
/**
1925,11 → 2005,6
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
// if (obj->tiling_mode != I915_TILING_NONE) {
// ret = i915_gem_object_get_fence(obj, pipelined);
// if (ret)
// goto err_unpin;
// }
 
dev_priv->mm.interruptible = true;
return 0;
1949,7 → 2024,7
 
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
* is assumed to be a power-of-two. */
static unsigned long gen4_compute_dspaddr_offset_xtiled(int *x, int *y,
unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned int pitch)
{
1992,24 → 2067,38
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8:
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->depth == 15)
dspcntr |= DISPPLANE_15_16BPP;
else
dspcntr |= DISPPLANE_16BPP;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
dspcntr |= DISPPLANE_BGRX555;
break;
case 24:
case 32:
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
 
if (INTEL_INFO(dev)->gen >= 4) {
if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
2023,7 → 2112,7
 
if (INTEL_INFO(dev)->gen >= 4) {
intel_crtc->dspaddr_offset =
gen4_compute_dspaddr_offset_xtiled(&x, &y,
intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
2076,27 → 2165,31
dspcntr = I915_READ(reg);
/* Mask out pixel format bits in case we change it */
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
switch (fb->bits_per_pixel) {
case 8:
switch (fb->pixel_format) {
case DRM_FORMAT_C8:
dspcntr |= DISPPLANE_8BPP;
break;
case 16:
if (fb->depth != 16)
return -EINVAL;
 
dspcntr |= DISPPLANE_16BPP;
case DRM_FORMAT_RGB565:
dspcntr |= DISPPLANE_BGRX565;
break;
case 24:
case 32:
if (fb->depth == 24)
dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
else if (fb->depth == 30)
dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
else
return -EINVAL;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
dspcntr |= DISPPLANE_BGRX888;
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
dspcntr |= DISPPLANE_RGBX888;
break;
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
dspcntr |= DISPPLANE_BGRX101010;
break;
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
dspcntr |= DISPPLANE_RGBX101010;
break;
default:
DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
DRM_ERROR("Unknown pixel format 0x%08x\n", fb->pixel_format);
return -EINVAL;
}
 
2112,7 → 2205,7
 
linear_offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
intel_crtc->dspaddr_offset =
gen4_compute_dspaddr_offset_xtiled(&x, &y,
intel_gen4_compute_offset_xtiled(&x, &y,
fb->bits_per_pixel / 8,
fb->pitches[0]);
linear_offset -= intel_crtc->dspaddr_offset;
2122,8 → 2215,12
I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_MODIFY_DISPBASE(DSPSURF(plane),
obj->gtt_offset + intel_crtc->dspaddr_offset);
if (IS_HASWELL(dev)) {
I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
} else {
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPLINOFF(plane), linear_offset);
}
POSTING_READ(reg);
 
return 0;
2179,7 → 2276,6
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct drm_framebuffer *old_fb;
int ret;
2312,17 → 2408,28
FDI_FE_ERRC_ENABLE);
}
 
static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
static void ivb_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flags = I915_READ(SOUTH_CHICKEN1);
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
struct intel_crtc *pipe_C_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]);
uint32_t temp;
 
flags |= FDI_PHASE_SYNC_OVR(pipe);
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
flags |= FDI_PHASE_SYNC_EN(pipe);
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
POSTING_READ(SOUTH_CHICKEN1);
/* When everything is off disable fdi C so that we could enable fdi B
* with all lanes. XXX: This misses the case where a pipe is not using
* any pch resources and so doesn't need any fdi lanes. */
if (!pipe_B_crtc->base.enabled && !pipe_C_crtc->base.enabled) {
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
temp = I915_READ(SOUTH_CHICKEN1);
temp &= ~FDI_BC_BIFURCATION_SELECT;
DRM_DEBUG_KMS("disabling fdi C rx\n");
I915_WRITE(SOUTH_CHICKEN1, temp);
}
}
 
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2367,11 → 2474,9
udelay(150);
 
/* Ironlake workaround, enable clock pointer after FDI enable*/
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
FDI_RX_PHASE_SYNC_POINTER_EN);
}
 
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
2460,6 → 2565,9
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
I915_WRITE(FDI_RX_MISC(pipe),
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
2474,9 → 2582,6
POSTING_READ(reg);
udelay(150);
 
if (HAS_PCH_CPT(dev))
cpt_phase_pointer_enable(dev, pipe);
 
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
2580,6 → 2685,9
POSTING_READ(reg);
udelay(150);
 
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
I915_READ(FDI_RX_IIR(pipe)));
 
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
2592,6 → 2700,9
temp |= FDI_COMPOSITE_SYNC;
I915_WRITE(reg, temp | FDI_TX_ENABLE);
 
I915_WRITE(FDI_RX_MISC(pipe),
FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
 
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
2603,9 → 2714,6
POSTING_READ(reg);
udelay(150);
 
if (HAS_PCH_CPT(dev))
cpt_phase_pointer_enable(dev, pipe);
 
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
2623,7 → 2731,7
if (temp & FDI_RX_BIT_LOCK ||
(I915_READ(reg) & FDI_RX_BIT_LOCK)) {
I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
DRM_DEBUG_KMS("FDI train 1 done.\n");
DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
break;
}
}
2664,7 → 2772,7
 
if (temp & FDI_RX_SYMBOL_LOCK) {
I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
DRM_DEBUG_KMS("FDI train 2 done.\n");
DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
break;
}
}
2681,9 → 2789,6
int pipe = intel_crtc->pipe;
u32 reg, temp;
 
/* Write the TU size bits so error detection works */
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
2747,17 → 2852,6
udelay(100);
}
 
static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 flags = I915_READ(SOUTH_CHICKEN1);
 
flags &= ~(FDI_PHASE_SYNC_EN(pipe));
I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
POSTING_READ(SOUTH_CHICKEN1);
}
static void ironlake_fdi_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
2784,11 → 2878,6
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev)) {
I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_EN));
} else if (HAS_PCH_CPT(dev)) {
cpt_phase_pointer_disable(dev, pipe);
}
 
/* still set train pattern 1 */
2851,7 → 2940,7
}
#endif
 
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
static bool ironlake_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *intel_encoder;
2861,23 → 2950,6
* must be driven by its own crtc; no sharing is possible.
*/
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
 
/* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
* CPU handles all others */
if (IS_HASWELL(dev)) {
/* It is still unclear how this will work on PPT, so throw up a warning */
WARN_ON(!HAS_PCH_LPT(dev));
 
if (intel_encoder->type == INTEL_OUTPUT_ANALOG) {
DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
return true;
} else {
DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
intel_encoder->type);
return false;
}
}
 
switch (intel_encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
2889,6 → 2961,11
return true;
}
 
static bool haswell_crtc_driving_pch(struct drm_crtc *crtc)
{
return intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG);
}
 
/* Program iCLKIP clock to the desired frequency */
static void lpt_program_iclkip(struct drm_crtc *crtc)
{
2904,8 → 2981,9
 
/* Disable SSCCTL */
intel_sbi_write(dev_priv, SBI_SSCCTL6,
intel_sbi_read(dev_priv, SBI_SSCCTL6) |
SBI_SSCCTL_DISABLE);
intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK) |
SBI_SSCCTL_DISABLE,
SBI_ICLK);
 
/* 20MHz is a corner case which is out of range for the 7-bit divisor */
if (crtc->mode.clock == 20000) {
2946,7 → 3024,7
phaseinc);
 
/* Program SSCDIVINTPHASE6 */
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2953,26 → 3031,18
temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
 
intel_sbi_write(dev_priv,
SBI_SSCDIVINTPHASE6,
temp);
 
/* Program SSCAUXDIV */
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
intel_sbi_write(dev_priv,
SBI_SSCAUXDIV6,
temp);
intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
 
 
/* Enable modulator and associated divider */
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
temp &= ~SBI_SSCCTL_DISABLE;
intel_sbi_write(dev_priv,
SBI_SSCCTL6,
temp);
intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
 
/* Wait for initialization time */
udelay(24);
2998,15 → 3068,24
 
assert_transcoder_disabled(dev_priv, pipe);
 
/* Write the TU size bits before fdi link training, so that error
* detection works. */
I915_WRITE(FDI_RX_TUSIZE1(pipe),
I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
 
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc);
 
intel_enable_pch_pll(intel_crtc);
/* XXX: pch pll's can be enabled any time before we enable the PCH
* transcoder, and we actually should do this to not upset any PCH
* transcoder that already use the clock when we share it.
*
* Note that enable_pch_pll tries to do the right thing, but get_pch_pll
* unconditionally resets the pll - we need that to have the right LVDS
* enable sequence. */
ironlake_enable_pch_pll(intel_crtc);
 
if (HAS_PCH_LPT(dev)) {
DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
lpt_program_iclkip(crtc);
} else if (HAS_PCH_CPT(dev)) {
if (HAS_PCH_CPT(dev)) {
u32 sel;
 
temp = I915_READ(PCH_DPLL_SEL);
3043,7 → 3122,6
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
 
if (!IS_HASWELL(dev))
intel_fdi_normal_train(crtc);
 
/* For PCH DP, enable TRANS_DP_CTL */
3076,17 → 3154,39
temp |= TRANS_DP_PORT_SEL_D;
break;
default:
DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
temp |= TRANS_DP_PORT_SEL_B;
break;
BUG();
}
 
I915_WRITE(reg, temp);
}
 
intel_enable_transcoder(dev_priv, pipe);
ironlake_enable_pch_transcoder(dev_priv, pipe);
}
 
static void lpt_pch_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 
assert_transcoder_disabled(dev_priv, TRANSCODER_A);
 
lpt_program_iclkip(crtc);
 
/* Set transcoder timing. */
I915_WRITE(_TRANS_HTOTAL_A, I915_READ(HTOTAL(cpu_transcoder)));
I915_WRITE(_TRANS_HBLANK_A, I915_READ(HBLANK(cpu_transcoder)));
I915_WRITE(_TRANS_HSYNC_A, I915_READ(HSYNC(cpu_transcoder)));
 
I915_WRITE(_TRANS_VTOTAL_A, I915_READ(VTOTAL(cpu_transcoder)));
I915_WRITE(_TRANS_VBLANK_A, I915_READ(VBLANK(cpu_transcoder)));
I915_WRITE(_TRANS_VSYNC_A, I915_READ(VSYNC(cpu_transcoder)));
I915_WRITE(_TRANS_VSYNCSHIFT_A, I915_READ(VSYNCSHIFT(cpu_transcoder)));
 
lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
}
 
static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
{
struct intel_pch_pll *pll = intel_crtc->pch_pll;
3177,16 → 3277,12
void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
int dslreg = PIPEDSL(pipe);
u32 temp;
 
temp = I915_READ(dslreg);
udelay(500);
if (wait_for(I915_READ(dslreg) != temp, 5)) {
/* Without this, mode sets may fail silently on FDI */
I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
udelay(250);
I915_WRITE(tc2reg, 0);
if (wait_for(I915_READ(dslreg) != temp, 5))
DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
}
3217,9 → 3313,12
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
 
is_pch_port = intel_crtc_driving_pch(crtc);
is_pch_port = ironlake_crtc_driving_pch(crtc);
 
if (is_pch_port) {
/* Note: FDI PLL enabling _must_ be done before we enable the
* cpu pipes, hence this is separate from all the other fdi/pch
* enabling. */
ironlake_fdi_pll_enable(intel_crtc);
} else {
assert_fdi_tx_disabled(dev_priv, pipe);
3232,11 → 3331,16
 
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
(intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev))
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
else
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
3277,6 → 3381,83
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
static void haswell_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
bool is_pch_port;
 
WARN_ON(!crtc->enabled);
 
if (intel_crtc->active)
return;
 
intel_crtc->active = true;
intel_update_watermarks(dev);
 
is_pch_port = haswell_crtc_driving_pch(crtc);
 
if (is_pch_port)
dev_priv->display.fdi_link_train(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->pre_enable)
encoder->pre_enable(encoder);
 
intel_ddi_enable_pipe_clock(intel_crtc);
 
/* Enable panel fitting for eDP */
if (dev_priv->pch_pf_size &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
/* Force use of hard-coded filter coefficients
* as some pre-programmed values are broken,
* e.g. x201.
*/
I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
PF_PIPE_SEL_IVB(pipe));
I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
}
 
/*
* On ILK+ LUT must be loaded before the pipe is running but with
* clocks enabled
*/
intel_crtc_load_lut(crtc);
 
intel_ddi_set_pipe_settings(crtc);
intel_ddi_enable_pipe_func(crtc);
 
intel_enable_pipe(dev_priv, pipe, is_pch_port);
intel_enable_plane(dev_priv, plane, pipe);
 
if (is_pch_port)
lpt_pch_enable(crtc);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
// intel_crtc_update_cursor(crtc, true);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->enable(encoder);
 
/*
* There seems to be a race in PCH platform hw (at least on some
* outputs) where an enabled pipe still completes any pageflip right
* away (as if the pipe is off) instead of waiting for vblank. As soon
* as the first vblank happend, everything works as expected. Hence just
* wait for one vblank before returning to avoid strange things
* happening.
*/
intel_wait_for_vblank(dev, intel_crtc->pipe);
}
 
static void ironlake_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
3315,7 → 3496,7
 
ironlake_fdi_disable(crtc);
 
intel_disable_transcoder(dev_priv, pipe);
ironlake_disable_pch_transcoder(dev_priv, pipe);
 
if (HAS_PCH_CPT(dev)) {
/* disable TRANS_DP_CTL */
3357,6 → 3538,58
mutex_unlock(&dev->struct_mutex);
}
 
static void haswell_crtc_disable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_encoder *encoder;
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
bool is_pch_port;
 
if (!intel_crtc->active)
return;
 
is_pch_port = haswell_crtc_driving_pch(crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
encoder->disable(encoder);
 
 
intel_disable_plane(dev_priv, plane, pipe);
 
if (dev_priv->cfb_plane == plane)
intel_disable_fbc(dev);
 
intel_disable_pipe(dev_priv, pipe);
 
intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
 
/* Disable PF */
I915_WRITE(PF_CTL(pipe), 0);
I915_WRITE(PF_WIN_SZ(pipe), 0);
 
intel_ddi_disable_pipe_clock(intel_crtc);
 
for_each_encoder_on_crtc(dev, crtc, encoder)
if (encoder->post_disable)
encoder->post_disable(encoder);
 
if (is_pch_port) {
lpt_disable_pch_transcoder(dev_priv);
intel_ddi_fdi_disable(crtc);
}
 
intel_crtc->active = false;
intel_update_watermarks(dev);
 
mutex_lock(&dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
}
 
static void ironlake_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3363,6 → 3596,17
intel_put_pch_pll(intel_crtc);
}
 
static void haswell_crtc_off(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/* Stop saying we're using TRANSCODER_EDP because some other CRTC might
* start using it. */
intel_crtc->cpu_transcoder = intel_crtc->pipe;
 
intel_ddi_put_crtc_pll(crtc);
}
 
static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
{
if (!enable && intel_crtc->overlay) {
3861,7 → 4105,7
/* Use VBT settings if we have an eDP panel */
unsigned int edp_bpc = dev_priv->edp.bpp / 3;
 
if (edp_bpc < display_bpc) {
if (edp_bpc && edp_bpc < display_bpc) {
DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
display_bpc = edp_bpc;
}
4077,7 → 4321,7
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, intel_clock_t *reduced_clock,
int refclk, int num_connectors)
int num_connectors)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
4085,10 → 4329,20
int pipe = intel_crtc->pipe;
u32 dpll, mdiv, pdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
bool is_hdmi;
bool is_sdvo;
u32 temp;
 
is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
dpll = DPLL_VGA_MODE_DIS;
dpll |= DPLL_EXT_BUFFER_ENABLE_VLV;
dpll |= DPLL_REFA_CLK_ENABLE_VLV;
dpll |= DPLL_INTEGRATED_CLOCK_VLV;
 
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
 
bestn = clock->n;
bestm1 = clock->m1;
bestm2 = clock->m2;
4095,12 → 4349,10
bestp1 = clock->p1;
bestp2 = clock->p2;
 
/* Enable DPIO clock input */
dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
I915_WRITE(DPLL(pipe), dpll);
POSTING_READ(DPLL(pipe));
 
/*
* In Valleyview PLL and program lane counter registers are exposed
* through DPIO interface
*/
mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
mdiv |= ((bestn << DPIO_N_SHIFT));
4111,12 → 4363,13
 
intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
 
pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
pdiv = (1 << DPIO_REFSEL_OVERRIDE) | (5 << DPIO_PLL_MODESEL_SHIFT) |
(3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
(8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
(7 << DPIO_PLL_REFCLK_SEL_SHIFT) | (8 << DPIO_DRIVER_CTL_SHIFT) |
(5 << DPIO_CLK_BIAS_CTL_SHIFT);
intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
 
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x005f003b);
 
dpll |= DPLL_VCO_ENABLE;
I915_WRITE(DPLL(pipe), dpll);
4124,20 → 4377,45
if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
DRM_ERROR("DPLL %d failed to lock\n", pipe);
 
if (is_hdmi) {
u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x620);
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
intel_dp_set_m_n(crtc, mode, adjusted_mode);
 
I915_WRITE(DPLL(pipe), dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(DPLL(pipe));
udelay(150);
 
temp = 0;
if (is_sdvo) {
temp = intel_mode_get_pixel_multiplier(adjusted_mode);
if (temp > 1)
temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
else
temp = 0;
 
}
I915_WRITE(DPLL_MD(pipe), temp);
POSTING_READ(DPLL_MD(pipe));
}
 
intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
/* Now program lane control registers */
if(intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)
|| intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
{
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL1, temp);
}
if(intel_pipe_has_type(crtc,INTEL_OUTPUT_EDP))
{
temp = 0x1000C4;
if(pipe == 1)
temp |= (1 << 21);
intel_dpio_write(dev_priv, DPIO_DATA_CHANNEL2, temp);
}
}
 
static void i9xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *mode,
4152,6 → 4430,8
u32 dpll;
bool is_sdvo;
 
i9xx_update_pll_dividers(crtc, clock, reduced_clock);
 
is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
 
4252,7 → 4532,7
 
static void i8xx_update_pll(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
intel_clock_t *clock, intel_clock_t *reduced_clock,
int num_connectors)
{
struct drm_device *dev = crtc->dev;
4261,6 → 4541,8
int pipe = intel_crtc->pipe;
u32 dpll;
 
i9xx_update_pll_dividers(crtc, clock, reduced_clock);
 
dpll = DPLL_VGA_MODE_DIS;
 
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4310,6 → 4592,64
I915_WRITE(DPLL(pipe), dpll);
}
 
static void intel_set_pipe_timings(struct intel_crtc *intel_crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
uint32_t vsyncshift;
 
if (!IS_GEN2(dev) && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal / 2;
} else {
vsyncshift = 0;
}
 
if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
 
I915_WRITE(HTOTAL(cpu_transcoder),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(cpu_transcoder),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(cpu_transcoder),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
 
I915_WRITE(VTOTAL(cpu_transcoder),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(cpu_transcoder),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(cpu_transcoder),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
 
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
* documented on the DDI_FUNC_CTL register description, EDP Input Select
* bits. */
if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
 
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
}
 
static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
4323,7 → 4663,7
int plane = intel_crtc->plane;
int refclk, num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dspcntr, pipeconf, vsyncshift;
u32 dspcntr, pipeconf;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder;
4387,14 → 4727,14
if (is_sdvo && is_tv)
i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
 
i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
&reduced_clock : NULL);
 
if (IS_GEN2(dev))
i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
i8xx_update_pll(crtc, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
else if (IS_VALLEYVIEW(dev))
vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
refclk, num_connectors);
vlv_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
num_connectors);
else
i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
has_reduced_clock ? &reduced_clock : NULL,
4435,6 → 4775,14
}
}
 
if (IS_VALLEYVIEW(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
if (adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
PIPECONF_ENABLE |
I965_PIPECONF_ACTIVE;
}
}
 
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
 
4450,41 → 4798,13
 
pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (!IS_GEN2(dev) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
vsyncshift = adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2;
} else {
else
pipeconf |= PIPECONF_PROGRESSIVE;
vsyncshift = 0;
}
 
if (!IS_GEN3(dev))
I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(pipe),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(pipe),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
 
I915_WRITE(VTOTAL(pipe),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(pipe),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(pipe),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
 
/* pipesrc and dspsize control the size that is scaled from,
* which should always be the user's requested size.
*/
4492,8 → 4812,6
((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(DSPPOS(plane), 0);
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
 
I915_WRITE(PIPECONF(pipe), pipeconf);
POSTING_READ(PIPECONF(pipe));
4511,10 → 4829,7
return ret;
}
 
/*
* Initialize reference clocks when the driver loads
*/
void ironlake_init_pch_refclk(struct drm_device *dev)
static void ironlake_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
4628,6 → 4943,182
}
}
 
/* Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O. */
static void lpt_init_pch_refclk(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
bool has_vga = false;
bool is_sdv = false;
u32 tmp;
 
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
switch (encoder->type) {
case INTEL_OUTPUT_ANALOG:
has_vga = true;
break;
}
}
 
if (!has_vga)
return;
 
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_DISABLE;
tmp |= SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
udelay(24);
 
tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
 
if (!is_sdv) {
tmp = I915_READ(SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
 
if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
DRM_ERROR("FDI mPHY reset assert timeout\n");
 
tmp = I915_READ(SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
I915_WRITE(SOUTH_CHICKEN2, tmp);
 
if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0,
100))
DRM_ERROR("FDI mPHY reset de-assert timeout\n");
}
 
tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
tmp &= ~(0xFF << 24);
tmp |= (0x12 << 24);
intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x808C, SBI_MPHY);
tmp &= ~(0x3 << 6);
tmp |= (1 << 6) | (1 << 0);
intel_sbi_write(dev_priv, 0x808C, tmp, SBI_MPHY);
}
 
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x800C, SBI_MPHY);
tmp |= 0x7FFF;
intel_sbi_write(dev_priv, 0x800C, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
tmp |= (1 << 11);
intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
 
if (is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x2038, SBI_MPHY);
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
intel_sbi_write(dev_priv, 0x2038, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2138, SBI_MPHY);
tmp |= (0x3F << 24) | (0xF << 20) | (0xF << 16);
intel_sbi_write(dev_priv, 0x2138, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x203C, SBI_MPHY);
tmp |= (0x3F << 8);
intel_sbi_write(dev_priv, 0x203C, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x213C, SBI_MPHY);
tmp |= (0x3F << 8);
intel_sbi_write(dev_priv, 0x213C, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
tmp |= (1 << 24) | (1 << 21) | (1 << 18);
intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
tmp &= ~(7 << 13);
tmp |= (5 << 13);
intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
}
 
tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
tmp &= ~0xFF;
tmp |= 0x1C;
intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
tmp &= ~(0xFF << 16);
tmp |= (0x1C << 16);
intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
 
if (!is_sdv) {
tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
tmp |= (1 << 27);
intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
 
tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
tmp &= ~(0xF << 28);
tmp |= (4 << 28);
intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
}
 
/* ULT uses SBI_GEN0, but ULT doesn't have VGA, so we don't care. */
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
}
 
/*
* Initialize reference clocks when the driver loads
*/
void intel_init_pch_refclk(struct drm_device *dev)
{
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
else if (HAS_PCH_LPT(dev))
lpt_init_pch_refclk(dev);
}
 
static int ironlake_get_refclk(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
4684,8 → 5175,8
val |= PIPE_12BPC;
break;
default:
val |= PIPE_8BPC;
break;
/* Case prevented by intel_choose_pipe_bpp_dither. */
BUG();
}
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
4702,6 → 5193,31
POSTING_READ(PIPECONF(pipe));
}
 
static void haswell_set_pipeconf(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
bool dither)
{
struct drm_i915_private *dev_priv = crtc->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
uint32_t val;
 
val = I915_READ(PIPECONF(cpu_transcoder));
 
val &= ~(PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_MASK);
if (dither)
val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
 
val &= ~PIPECONF_INTERLACE_MASK_HSW;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
val |= PIPECONF_INTERLACED_ILK;
else
val |= PIPECONF_PROGRESSIVE;
 
I915_WRITE(PIPECONF(cpu_transcoder), val);
POSTING_READ(PIPECONF(cpu_transcoder));
}
 
static bool ironlake_compute_clocks(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock,
4765,74 → 5281,126
return true;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t temp;
 
temp = I915_READ(SOUTH_CHICKEN1);
if (temp & FDI_BC_BIFURCATION_SELECT)
return;
 
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
 
temp |= FDI_BC_BIFURCATION_SELECT;
DRM_DEBUG_KMS("enabling fdi C rx\n");
I915_WRITE(SOUTH_CHICKEN1, temp);
POSTING_READ(SOUTH_CHICKEN1);
}
 
static bool ironlake_check_fdi_lanes(struct intel_crtc *intel_crtc)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *pipe_B_crtc =
to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]);
 
DRM_DEBUG_KMS("checking fdi config on pipe %i, lanes %i\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
if (intel_crtc->fdi_lanes > 4) {
DRM_DEBUG_KMS("invalid fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 4;
 
return false;
}
 
if (dev_priv->num_pipe == 2)
return true;
 
switch (intel_crtc->pipe) {
case PIPE_A:
return true;
case PIPE_B:
if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled &&
intel_crtc->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 2;
 
return false;
}
 
if (intel_crtc->fdi_lanes > 2)
WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT);
else
cpt_enable_fdi_bc_bifurcation(dev);
 
return true;
case PIPE_C:
if (!pipe_B_crtc->base.enabled || pipe_B_crtc->fdi_lanes <= 2) {
if (intel_crtc->fdi_lanes > 2) {
DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %i: %i lanes\n",
intel_crtc->pipe, intel_crtc->fdi_lanes);
/* Clamp lanes to avoid programming the hw with bogus values. */
intel_crtc->fdi_lanes = 2;
 
return false;
}
} else {
DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
return false;
}
 
cpt_enable_fdi_bc_bifurcation(dev);
 
return true;
default:
BUG();
}
}
 
int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
{
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * bpp * 21 / 20;
return bps / (link_bw * 8) + 1;
}
 
static void ironlake_set_m_n(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
struct intel_encoder *encoder, *edp_encoder = NULL;
int ret;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct intel_encoder *intel_encoder, *edp_encoder = NULL;
struct fdi_m_n m_n = {0};
u32 temp;
int target_clock, pixel_multiplier, lane, link_bw, factor;
unsigned int pipe_bpp;
bool dither;
bool is_cpu_edp = false, is_pch_edp = false;
int target_clock, pixel_multiplier, lane, link_bw;
bool is_dp = false, is_cpu_edp = false;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
is_crt = true;
break;
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
if (intel_encoder_is_pch_edp(&encoder->base))
is_pch_edp = true;
else
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
is_cpu_edp = true;
edp_encoder = encoder;
edp_encoder = intel_encoder;
break;
}
 
num_connectors++;
}
 
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
/* FDI link */
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
lane = 0;
4859,30 → 5427,10
else
target_clock = adjusted_mode->clock;
 
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &pipe_bpp,
adjusted_mode);
if (is_lvds && dev_priv->lvds_dither)
dither = true;
if (!lane)
lane = ironlake_get_lanes_required(target_clock, link_bw,
intel_crtc->bpp);
 
if (pipe_bpp != 18 && pipe_bpp != 24 && pipe_bpp != 30 &&
pipe_bpp != 36) {
WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
pipe_bpp);
pipe_bpp = 24;
}
intel_crtc->bpp = pipe_bpp;
 
if (!lane) {
/*
* Account for spread spectrum to avoid
* oversubscribing the link. Max center spread
* is 2.5%; use 5% for safety's sake.
*/
u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
lane = bps / (link_bw * 8) + 1;
}
 
intel_crtc->fdi_lanes = lane;
 
if (pixel_multiplier > 1)
4890,11 → 5438,52
ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
&m_n);
 
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
I915_WRITE(PIPE_DATA_M1(cpu_transcoder), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
}
 
static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
struct drm_display_mode *adjusted_mode,
intel_clock_t *clock, u32 fp)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *intel_encoder;
uint32_t dpll;
int factor, pixel_multiplier, num_connectors = 0;
bool is_lvds = false, is_sdvo = false, is_tv = false;
bool is_dp = false, is_cpu_edp = false;
 
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
switch (intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_SDVO:
case INTEL_OUTPUT_HDMI:
is_sdvo = true;
if (intel_encoder->needs_tv_clock)
is_tv = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
if (!intel_encoder_is_pch_edp(&intel_encoder->base))
is_cpu_edp = true;
break;
}
 
num_connectors++;
}
 
/* Enable autotuning of the PLL clock (if permissible) */
factor = 21;
if (is_lvds) {
4905,7 → 5494,7
} else if (is_sdvo && is_tv)
factor = 20;
 
if (clock.m < factor * clock.n)
if (clock->m < factor * clock->n)
fp |= FP_CB_TUNE;
 
dpll = 0;
4915,7 → 5504,7
else
dpll |= DPLLB_MODE_DAC_SERIAL;
if (is_sdvo) {
int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
if (pixel_multiplier > 1) {
dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
4925,11 → 5514,11
dpll |= DPLL_DVO_HIGH_SPEED;
 
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
 
switch (clock.p2) {
switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
4955,15 → 5544,79
else
dpll |= PLL_REF_INPUT_DREFCLK;
 
return dpll;
}
 
static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
u32 temp;
int ret;
bool dither, fdi_config_ok;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
if (!intel_encoder_is_pch_edp(&encoder->base))
is_cpu_edp = true;
break;
}
 
num_connectors++;
}
 
WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
"Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
 
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
&has_reduced_clock, &reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
adjusted_mode);
if (is_lvds && dev_priv->lvds_dither)
dither = true;
 
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
 
dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock, fp);
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its own on
* pre-Haswell/LPT generation */
if (HAS_PCH_LPT(dev)) {
DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
pipe);
} else if (!is_cpu_edp) {
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
if (!is_cpu_edp) {
struct intel_pch_pll *pll;
 
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
5049,55 → 5702,232
}
}
 
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
/* the chip adds 2 halflines automatically */
adjusted_mode->crtc_vtotal -= 1;
adjusted_mode->crtc_vblank_end -= 1;
I915_WRITE(VSYNCSHIFT(pipe),
adjusted_mode->crtc_hsync_start
- adjusted_mode->crtc_htotal/2);
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
/* Note, this also computes intel_crtc->fdi_lanes which is used below in
* ironlake_check_fdi_lanes. */
ironlake_set_m_n(crtc, mode, adjusted_mode);
 
fdi_config_ok = ironlake_check_fdi_lanes(intel_crtc);
 
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_wait_for_vblank(dev, pipe);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
 
ret = intel_pipe_set_base(crtc, x, y, fb);
 
intel_update_watermarks(dev);
 
intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
 
return fdi_config_ok ? ret : -EINVAL;
}
 
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *fb)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
int num_connectors = 0;
intel_clock_t clock, reduced_clock;
u32 dpll = 0, fp = 0, fp2 = 0;
bool ok, has_reduced_clock = false;
bool is_lvds = false, is_dp = false, is_cpu_edp = false;
struct intel_encoder *encoder;
u32 temp;
int ret;
bool dither;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
switch (encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_DISPLAYPORT:
is_dp = true;
break;
case INTEL_OUTPUT_EDP:
is_dp = true;
if (!intel_encoder_is_pch_edp(&encoder->base))
is_cpu_edp = true;
break;
}
 
num_connectors++;
}
 
if (is_cpu_edp)
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
else
intel_crtc->cpu_transcoder = pipe;
 
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
 
WARN(num_connectors != 1, "%d connectors attached to pipe %c\n",
num_connectors, pipe_name(pipe));
 
WARN_ON(I915_READ(PIPECONF(intel_crtc->cpu_transcoder)) &
(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE));
 
WARN_ON(I915_READ(DSPCNTR(plane)) & DISPLAY_PLANE_ENABLE);
 
if (!intel_ddi_pll_mode_set(crtc, adjusted_mode->clock))
return -EINVAL;
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ok = ironlake_compute_clocks(crtc, adjusted_mode, &clock,
&has_reduced_clock,
&reduced_clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
}
 
/* Ensure that the cursor is valid for the new mode before changing... */
// intel_crtc_update_cursor(crtc, true);
 
/* determine panel color depth */
dither = intel_choose_pipe_bpp_dither(crtc, fb, &intel_crtc->bpp,
adjusted_mode);
if (is_lvds && dev_priv->lvds_dither)
dither = true;
 
DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
drm_mode_debug_printmodeline(mode);
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (has_reduced_clock)
fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
reduced_clock.m2;
 
dpll = ironlake_compute_dpll(intel_crtc, adjusted_mode, &clock,
fp);
 
/* CPU eDP is the only output that doesn't need a PCH PLL of its
* own on pre-Haswell/LPT generation */
if (!is_cpu_edp) {
struct intel_pch_pll *pll;
 
pll = intel_get_pch_pll(intel_crtc, dpll, fp);
if (pll == NULL) {
DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
pipe);
return -EINVAL;
}
} else
intel_put_pch_pll(intel_crtc);
 
/* The LVDS pin pair needs to be on before the DPLLs are
* enabled. This is an exception to the general rule that
* mode_set doesn't turn things on.
*/
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
if (HAS_PCH_CPT(dev)) {
temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
} else {
I915_WRITE(VSYNCSHIFT(pipe), 0);
if (pipe == 1)
temp |= LVDS_PIPEB_SELECT;
else
temp &= ~LVDS_PIPEB_SELECT;
}
 
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
((adjusted_mode->crtc_htotal - 1) << 16));
I915_WRITE(HBLANK(pipe),
(adjusted_mode->crtc_hblank_start - 1) |
((adjusted_mode->crtc_hblank_end - 1) << 16));
I915_WRITE(HSYNC(pipe),
(adjusted_mode->crtc_hsync_start - 1) |
((adjusted_mode->crtc_hsync_end - 1) << 16));
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
/* Set the B0-B3 data pairs corresponding to whether
* we're going to set the DPLLs for dual-channel mode or
* not.
*/
if (clock.p2 == 7)
temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
else
temp &= ~(LVDS_B0B3_POWER_UP |
LVDS_CLKB_POWER_UP);
 
I915_WRITE(VTOTAL(pipe),
(adjusted_mode->crtc_vdisplay - 1) |
((adjusted_mode->crtc_vtotal - 1) << 16));
I915_WRITE(VBLANK(pipe),
(adjusted_mode->crtc_vblank_start - 1) |
((adjusted_mode->crtc_vblank_end - 1) << 16));
I915_WRITE(VSYNC(pipe),
(adjusted_mode->crtc_vsync_start - 1) |
((adjusted_mode->crtc_vsync_end - 1) << 16));
/* It would be nice to set 24 vs 18-bit mode
* (LVDS_A3_POWER_UP) appropriately here, but we need to
* look more thoroughly into how panels behave in the
* two modes.
*/
temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
temp |= LVDS_HSYNC_POLARITY;
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
I915_WRITE(PCH_LVDS, temp);
}
}
 
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
if (is_dp && !is_cpu_edp) {
intel_dp_set_m_n(crtc, mode, adjusted_mode);
} else {
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
/* For non-DP output, clear any trans DP clock recovery
* setting.*/
I915_WRITE(TRANSDATA_M1(pipe), 0);
I915_WRITE(TRANSDATA_N1(pipe), 0);
I915_WRITE(TRANSDPLINK_M1(pipe), 0);
I915_WRITE(TRANSDPLINK_N1(pipe), 0);
}
}
 
intel_crtc->lowfreq_avail = false;
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (intel_crtc->pch_pll) {
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
 
/* Wait for the clocks to stabilize. */
POSTING_READ(intel_crtc->pch_pll->pll_reg);
udelay(150);
 
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
I915_WRITE(PIPESRC(pipe),
((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
}
 
I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
if (intel_crtc->pch_pll) {
if (is_lvds && has_reduced_clock && i915_powersave) {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
intel_crtc->lowfreq_avail = true;
} else {
I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
}
}
}
 
intel_set_pipe_timings(intel_crtc, mode, adjusted_mode);
 
if (!is_dp || is_cpu_edp)
ironlake_set_m_n(crtc, mode, adjusted_mode);
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
if (is_cpu_edp)
ironlake_set_pll_edp(crtc, adjusted_mode->clock);
 
ironlake_set_pipeconf(crtc, adjusted_mode, dither);
haswell_set_pipeconf(crtc, adjusted_mode, dither);
 
intel_wait_for_vblank(dev, pipe);
 
/* Set up the display plane register */
I915_WRITE(DSPCNTR(plane), DISPPLANE_GAMMA_ENABLE);
POSTING_READ(DSPCNTR(plane));
5119,6 → 5949,8
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_encoder_helper_funcs *encoder_funcs;
struct intel_encoder *encoder;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int ret;
5129,9 → 5961,21
x, y, fb);
drm_vblank_post_modeset(dev, pipe);
 
if (ret != 0)
return ret;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.base.id,
drm_get_encoder_name(&encoder->base),
mode->base.id, mode->name);
encoder_funcs = encoder->base.helper_private;
encoder_funcs->mode_set(&encoder->base, mode, adjusted_mode);
}
 
return 0;
}
 
static bool intel_eld_uptodate(struct drm_connector *connector,
int reg_eldv, uint32_t bits_eldv,
int reg_elda, uint32_t bits_elda,
5767,7 → 6611,7
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
 
// obj = i915_gem_alloc_object(dev,
// intel_framebuffer_size_for_mode(mode, bpp));
5897,7 → 6741,7
DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
goto fail;
return false;
}
 
if (!intel_set_mode(crtc, mode, 0, 0, fb)) {
5904,17 → 6748,12
DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
if (old->release_fb)
old->release_fb->funcs->destroy(old->release_fb);
goto fail;
return false;
}
 
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
return true;
fail:
connector->encoder = NULL;
encoder->crtc = NULL;
return false;
}
 
void intel_release_load_detect_pipe(struct drm_connector *connector,
6039,12 → 6878,12
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
struct drm_display_mode *mode;
int htot = I915_READ(HTOTAL(pipe));
int hsync = I915_READ(HSYNC(pipe));
int vtot = I915_READ(VTOTAL(pipe));
int vsync = I915_READ(VSYNC(pipe));
int htot = I915_READ(HTOTAL(cpu_transcoder));
int hsync = I915_READ(HSYNC(cpu_transcoder));
int vtot = I915_READ(VTOTAL(cpu_transcoder));
int vsync = I915_READ(VSYNC(cpu_transcoder));
 
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
6202,14 → 7041,19
{
struct intel_unpin_work *work =
container_of(__work, struct intel_unpin_work, work);
struct drm_device *dev = work->crtc->dev;
 
mutex_lock(&work->dev->struct_mutex);
mutex_lock(&dev->struct_mutex);
intel_unpin_fb_obj(work->old_fb_obj);
drm_gem_object_unreference(&work->pending_flip_obj->base);
drm_gem_object_unreference(&work->old_fb_obj->base);
 
intel_update_fbc(work->dev);
mutex_unlock(&work->dev->struct_mutex);
intel_update_fbc(dev);
mutex_unlock(&dev->struct_mutex);
 
BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0);
atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count);
 
kfree(work);
}
 
6220,8 → 7064,6
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
struct timeval tvbl;
unsigned long flags;
 
/* Ignore early vblank irqs */
6230,25 → 7072,23
 
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
 
/* Ensure we don't miss a work->pending update ... */
smp_rmb();
 
if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
spin_unlock_irqrestore(&dev->event_lock, flags);
return;
}
 
/* and that the unpin work is consistent wrt ->pending. */
smp_rmb();
 
intel_crtc->unpin_work = NULL;
 
if (work->event) {
e = work->event;
e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
if (work->event)
drm_send_vblank_event(dev, intel_crtc->pipe, work->event);
 
e->event.tv_sec = tvbl.tv_sec;
e->event.tv_usec = tvbl.tv_usec;
 
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
 
drm_vblank_put(dev, intel_crtc->pipe);
 
spin_unlock_irqrestore(&dev->event_lock, flags);
6257,10 → 7097,10
 
atomic_clear_mask(1 << intel_crtc->plane,
&obj->pending_flip.counter);
 
wake_up(&dev_priv->pending_flip_queue);
schedule_work(&work->work);
 
queue_work(dev_priv->wq, &work->work);
 
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
}
 
6287,16 → 7127,25
to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
unsigned long flags;
 
/* NB: An MMIO update of the plane base pointer will also
* generate a page-flip completion irq, i.e. every modeset
* is also accompanied by a spurious intel_prepare_page_flip().
*/
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
if ((++intel_crtc->unpin_work->pending) > 1)
DRM_ERROR("Prepared flip multiple times\n");
} else {
DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
}
if (intel_crtc->unpin_work)
atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
spin_unlock_irqrestore(&dev->event_lock, flags);
}
 
inline static void intel_mark_page_flip_active(struct intel_crtc *intel_crtc)
{
/* Ensure that the work item is consistent when activating it ... */
smp_wmb();
atomic_set(&intel_crtc->unpin_work->pending, INTEL_FLIP_PENDING);
/* and that it is marked active as soon as the irq could fire. */
smp_wmb();
}
 
static int intel_gen2_queue_flip(struct drm_device *dev,
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
6330,6 → 7179,8
intel_ring_emit(ring, fb->pitches[0]);
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, 0); /* aux display base address, unused */
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
 
6370,6 → 7221,7
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, MI_NOOP);
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
 
6416,6 → 7268,8
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
 
6458,6 → 7312,8
pf = 0;
pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
intel_ring_emit(ring, pf | pipesrc);
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
 
6512,6 → 7368,8
intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, obj->gtt_offset + intel_crtc->dspaddr_offset);
intel_ring_emit(ring, (MI_NOOP));
 
intel_mark_page_flip_active(intel_crtc);
intel_ring_advance(ring);
return 0;
 
6560,7 → 7418,7
return -ENOMEM;
 
work->event = event;
work->dev = crtc->dev;
work->crtc = crtc;
intel_fb = to_intel_framebuffer(crtc->fb);
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
6585,6 → 7443,9
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
 
if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
flush_workqueue(dev_priv->wq);
 
ret = i915_mutex_lock_interruptible(dev);
if (ret)
goto cleanup;
6603,6 → 7464,7
* the flip occurs and the object is no longer visible.
*/
atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
atomic_inc(&intel_crtc->unpin_work_count);
 
ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
if (ret)
6617,6 → 7479,7
return 0;
 
cleanup_pending:
atomic_dec(&intel_crtc->unpin_work_count);
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
6914,7 → 7777,7
dev->mode_config.dpms_property;
 
connector->dpms = DRM_MODE_DPMS_ON;
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
dpms_property,
DRM_MODE_DPMS_ON);
 
7036,8 → 7899,6
struct drm_device *dev = crtc->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_display_mode *adjusted_mode, saved_mode, saved_hwmode;
struct drm_encoder_helper_funcs *encoder_funcs;
struct drm_encoder *encoder;
struct intel_crtc *intel_crtc;
unsigned disable_pipes, prepare_pipes, modeset_pipes;
bool ret = true;
7082,6 → 7943,9
* update the the output configuration. */
intel_modeset_update_state(dev, prepare_pipes);
 
if (dev_priv->display.modeset_global_resources)
dev_priv->display.modeset_global_resources(dev);
 
/* Set up the DPLL and any encoders state that needs to adjust or depend
* on the DPLL.
*/
7091,19 → 7955,7
x, y, fb);
if (!ret)
goto done;
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 
if (encoder->crtc != &intel_crtc->base)
continue;
 
DRM_DEBUG_KMS("[ENCODER:%d:%s] set [MODE:%d:%s]\n",
encoder->base.id, drm_get_encoder_name(encoder),
mode->base.id, mode->name);
encoder_funcs = encoder->helper_private;
encoder_funcs->mode_set(encoder, mode, adjusted_mode);
}
}
 
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
for_each_intel_crtc_masked(dev, prepare_pipes, intel_crtc)
7280,10 → 8132,6
DRM_DEBUG_KMS("encoder changed, full mode switch\n");
config->mode_changed = true;
}
 
/* Disable all disconnected encoders. */
if (connector->base.status == connector_status_disconnected)
connector->new_encoder = NULL;
}
/* connector->new_encoder is now updated for all connectors. */
 
7441,6 → 8289,12
// .page_flip = intel_crtc_page_flip,
};
 
static void intel_cpu_pll_init(struct drm_device *dev)
{
if (IS_HASWELL(dev))
intel_ddi_pll_init(dev);
}
 
static void intel_pch_pll_init(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
7480,6 → 8334,7
/* Swap pipes & planes for FBC on pre-965 */
intel_crtc->pipe = pipe;
intel_crtc->plane = pipe;
intel_crtc->cpu_transcoder = pipe;
if (IS_MOBILE(dev) && IS_GEN3(dev)) {
DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
intel_crtc->plane = !pipe;
7493,11 → 8348,6
intel_crtc->bpp = 24; /* default for pre-Ironlake */
 
drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
 
DRM_DEBUG_KMS("CRTC %d mode %x FB %x enable %d\n",
intel_crtc->base.base.id, intel_crtc->base.mode,
intel_crtc->base.fb, intel_crtc->base.enabled);
 
}
 
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
7574,16 → 8424,8
I915_WRITE(PFIT_CONTROL, 0);
}
 
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
 
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
 
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
intel_dp_init(dev, PCH_DP_D, PORT_D);
}
 
if (!(IS_HASWELL(dev) &&
(I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)))
intel_crt_init(dev);
 
if (IS_HASWELL(dev)) {
7607,7 → 8449,11
intel_ddi_init(dev, PORT_D);
} else if (HAS_PCH_SPLIT(dev)) {
int found;
dpd_is_edp = intel_dpd_is_edp(dev);
 
if (has_edp_a(dev))
intel_dp_init(dev, DP_A, PORT_A);
 
if (I915_READ(HDMIB) & PORT_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, PCH_SDVOB, true);
7626,11 → 8472,15
if (I915_READ(PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_C, PORT_C);
 
if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
 
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
intel_dp_init(dev, DP_C, PORT_C);
 
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
7643,9 → 8493,6
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
 
/* Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
intel_dp_init(dev, DP_C, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
 
7699,8 → 8546,9
intel_encoder_clones(encoder);
}
 
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
ironlake_init_pch_refclk(dev);
intel_init_pch_refclk(dev);
 
drm_helper_move_panel_connectors_to_head(dev);
}
 
 
7717,33 → 8565,74
{
int ret;
 
if (obj->tiling_mode == I915_TILING_Y)
if (obj->tiling_mode == I915_TILING_Y) {
DRM_DEBUG("hardware does not support tiling Y\n");
return -EINVAL;
}
 
if (mode_cmd->pitches[0] & 63)
if (mode_cmd->pitches[0] & 63) {
DRM_DEBUG("pitch (%d) must be at least 64 byte aligned\n",
mode_cmd->pitches[0]);
return -EINVAL;
}
 
/* FIXME <= Gen4 stride limits are bit unclear */
if (mode_cmd->pitches[0] > 32768) {
DRM_DEBUG("pitch (%d) must be at less than 32768\n",
mode_cmd->pitches[0]);
return -EINVAL;
}
 
if (obj->tiling_mode != I915_TILING_NONE &&
mode_cmd->pitches[0] != obj->stride) {
DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
mode_cmd->pitches[0], obj->stride);
return -EINVAL;
}
 
/* Reject formats not supported by any plane early. */
switch (mode_cmd->pixel_format) {
case DRM_FORMAT_RGB332:
case DRM_FORMAT_C8:
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
break;
case DRM_FORMAT_XRGB1555:
case DRM_FORMAT_ARGB1555:
if (INTEL_INFO(dev)->gen > 3) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
break;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
/* RGB formats are common across chipsets */
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
if (INTEL_INFO(dev)->gen < 4) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
break;
case DRM_FORMAT_YUYV:
case DRM_FORMAT_UYVY:
case DRM_FORMAT_YVYU:
case DRM_FORMAT_VYUY:
if (INTEL_INFO(dev)->gen < 5) {
DRM_DEBUG("invalid format: 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
break;
default:
DRM_DEBUG_KMS("unsupported pixel format %u\n",
mode_cmd->pixel_format);
DRM_DEBUG("unsupported pixel format 0x%08x\n", mode_cmd->pixel_format);
return -EINVAL;
}
 
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0)
return -EINVAL;
 
ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
7767,7 → 8656,13
struct drm_i915_private *dev_priv = dev->dev_private;
 
/* We always want a DPMS function */
if (HAS_PCH_SPLIT(dev)) {
if (IS_HASWELL(dev)) {
dev_priv->display.crtc_mode_set = haswell_crtc_mode_set;
dev_priv->display.crtc_enable = haswell_crtc_enable;
dev_priv->display.crtc_disable = haswell_crtc_disable;
dev_priv->display.off = haswell_crtc_off;
dev_priv->display.update_plane = ironlake_update_plane;
} else if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
dev_priv->display.crtc_enable = ironlake_crtc_enable;
dev_priv->display.crtc_disable = ironlake_crtc_disable;
7818,6 → 8713,8
/* FIXME: detect B0+ stepping and use auto training */
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
dev_priv->display.write_eld = ironlake_write_eld;
dev_priv->display.modeset_global_resources =
ivb_modeset_global_resources;
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
8031,6 → 8928,7
DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
 
intel_cpu_pll_init(dev);
intel_pch_pll_init(dev);
 
/* Just disable it once at startup */
8100,7 → 8998,7
u32 reg;
 
/* Clear any frame start delays used for debugging left by the BIOS */
reg = PIPECONF(crtc->pipe);
reg = PIPECONF(crtc->cpu_transcoder);
I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
 
/* We need to sanitize the plane -> pipe mapping first because this will
8219,7 → 9117,8
 
/* Scan out the current hw modeset state, sanitizes it and maps it into the drm
* and i915 state tracking structures. */
void intel_modeset_setup_hw_state(struct drm_device *dev)
void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore)
{
struct drm_i915_private *dev_priv = dev->dev_private;
enum pipe pipe;
8228,10 → 9127,35
struct intel_encoder *encoder;
struct intel_connector *connector;
 
if (IS_HASWELL(dev)) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
 
if (tmp & TRANS_DDI_FUNC_ENABLE) {
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
case TRANS_DDI_EDP_INPUT_A_ONOFF:
pipe = PIPE_A;
break;
case TRANS_DDI_EDP_INPUT_B_ONOFF:
pipe = PIPE_B;
break;
case TRANS_DDI_EDP_INPUT_C_ONOFF:
pipe = PIPE_C;
break;
}
 
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
crtc->cpu_transcoder = TRANSCODER_EDP;
 
DRM_DEBUG_KMS("Pipe %c using transcoder EDP\n",
pipe_name(pipe));
}
}
 
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
 
tmp = I915_READ(PIPECONF(pipe));
tmp = I915_READ(PIPECONF(crtc->cpu_transcoder));
if (tmp & PIPECONF_ENABLE)
crtc->active = true;
else
8244,6 → 9168,9
crtc->active ? "enabled" : "disabled");
}
 
if (IS_HASWELL(dev))
intel_ddi_setup_hw_pll_state(dev);
 
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
pipe = 0;
8290,9 → 9217,21
intel_sanitize_crtc(crtc);
}
 
if (force_restore) {
for_each_pipe(pipe) {
crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
intel_set_mode(&crtc->base, &crtc->base.mode,
crtc->base.x, crtc->base.y, crtc->base.fb);
}
 
// i915_redisable_vga(dev);
} else {
intel_modeset_update_staged_output_state(dev);
}
 
intel_modeset_check_state(dev);
 
drm_mode_config_reset(dev);
}
 
void intel_modeset_gem_init(struct drm_device *dev)
8301,7 → 9240,7
 
// intel_setup_overlay(dev);
 
intel_modeset_setup_hw_state(dev);
intel_modeset_setup_hw_state(dev, false);
}
 
void intel_modeset_cleanup(struct drm_device *dev)
8422,6 → 9361,7
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_display_error_state *error;
enum transcoder cpu_transcoder;
int i;
 
error = kmalloc(sizeof(*error), GFP_ATOMIC);
8429,6 → 9369,8
return NULL;
 
for_each_pipe(i) {
cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i);
 
error->cursor[i].control = I915_READ(CURCNTR(i));
error->cursor[i].position = I915_READ(CURPOS(i));
error->cursor[i].base = I915_READ(CURBASE(i));
8443,14 → 9385,14
error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
}
 
error->pipe[i].conf = I915_READ(PIPECONF(i));
error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder));
error->pipe[i].source = I915_READ(PIPESRC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(i));
error->pipe[i].hblank = I915_READ(HBLANK(i));
error->pipe[i].hsync = I915_READ(HSYNC(i));
error->pipe[i].vtotal = I915_READ(VTOTAL(i));
error->pipe[i].vblank = I915_READ(VBLANK(i));
error->pipe[i].vsync = I915_READ(VSYNC(i));
error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder));
error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder));
error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder));
error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder));
}
 
return error;
/drivers/video/drm/i915/intel_dp.c
36,8 → 36,6
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_LINK_STATUS_SIZE 6
#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
 
/**
49,7 → 47,9
*/
static bool is_edp(struct intel_dp *intel_dp)
{
return intel_dp->base.type == INTEL_OUTPUT_EDP;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
 
/**
76,15 → 76,16
return is_edp(intel_dp) && !is_pch_edp(intel_dp);
}
 
static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
{
return container_of(encoder, struct intel_dp, base.base);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
 
return intel_dig_port->base.base.dev;
}
 
static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_dp, base);
return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
}
 
/**
106,8 → 107,6
return is_pch_edp(intel_dp);
}
 
static void intel_dp_start_link_train(struct intel_dp *intel_dp);
static void intel_dp_complete_link_train(struct intel_dp *intel_dp);
static void intel_dp_link_down(struct intel_dp *intel_dp);
 
void
114,13 → 113,10
intel_edp_link_config(struct intel_encoder *intel_encoder,
int *lane_num, int *link_bw)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
*lane_num = intel_dp->lane_count;
if (intel_dp->link_bw == DP_LINK_BW_1_62)
*link_bw = 162000;
else if (intel_dp->link_bw == DP_LINK_BW_2_7)
*link_bw = 270000;
*link_bw = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
}
 
int
127,28 → 123,16
intel_edp_target_clock(struct intel_encoder *intel_encoder,
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
struct intel_connector *intel_connector = intel_dp->attached_connector;
 
if (intel_dp->panel_fixed_mode)
return intel_dp->panel_fixed_mode->clock;
if (intel_connector->panel.fixed_mode)
return intel_connector->panel.fixed_mode->clock;
else
return mode->clock;
}
 
static int
intel_dp_max_lane_count(struct intel_dp *intel_dp)
{
int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
switch (max_lane_count) {
case 1: case 2: case 4:
break;
default:
max_lane_count = 4;
}
return max_lane_count;
}
 
static int
intel_dp_max_link_bw(struct intel_dp *intel_dp)
{
int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
208,7 → 192,7
bool adjust_mode)
{
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
int max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
int max_rate, mode_rate;
 
mode_rate = intel_dp_link_required(mode->clock, 24);
234,12 → 218,14
struct drm_display_mode *mode)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
if (is_edp(intel_dp) && fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
 
if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay)
if (mode->vdisplay > fixed_mode->vdisplay)
return MODE_PANEL;
}
 
285,6 → 271,10
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t clkcfg;
 
/* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
if (IS_VALLEYVIEW(dev))
return 200;
 
clkcfg = I915_READ(CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
310,7 → 300,7
 
static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0;
318,7 → 308,7
 
static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0;
327,7 → 317,7
static void
intel_dp_check_edp(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (!is_edp(intel_dp))
346,7 → 336,8
uint8_t *recv, int recv_size)
{
uint32_t output_reg = intel_dp->output_reg;
struct drm_device *dev = intel_dp->base.base.dev;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t ch_ctl = output_reg + 0x10;
uint32_t ch_data = ch_ctl + 4;
356,6 → 347,29
uint32_t aux_clock_divider;
int try, precharge;
 
if (IS_HASWELL(dev)) {
switch (intel_dig_port->port) {
case PORT_A:
ch_ctl = DPA_AUX_CH_CTL;
ch_data = DPA_AUX_CH_DATA1;
break;
case PORT_B:
ch_ctl = PCH_DPB_AUX_CH_CTL;
ch_data = PCH_DPB_AUX_CH_DATA1;
break;
case PORT_C:
ch_ctl = PCH_DPC_AUX_CH_CTL;
ch_data = PCH_DPC_AUX_CH_DATA1;
break;
case PORT_D:
ch_ctl = PCH_DPD_AUX_CH_CTL;
ch_data = PCH_DPD_AUX_CH_DATA1;
break;
default:
BUG();
}
}
 
intel_dp_check_edp(intel_dp);
/* The clock divider is based off the hrawclk,
* and would like to run at 2MHz. So, take the
365,12 → 379,16
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
if (IS_GEN6(dev) || IS_GEN7(dev))
if (IS_HASWELL(dev))
aux_clock_divider = intel_ddi_get_cdclk_freq(dev_priv) >> 1;
else if (IS_VALLEYVIEW(dev))
aux_clock_divider = 100;
else if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */
aux_clock_divider = DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
else
aux_clock_divider = intel_hrawclk(dev) / 2;
 
642,9 → 660,6
return -EREMOTEIO;
}
 
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
 
static int
intel_dp_i2c_init(struct intel_dp *intel_dp,
struct intel_connector *intel_connector, const char *name)
670,7 → 685,7
return ret;
}
 
static bool
bool
intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
677,15 → 692,18
{
struct drm_device *dev = encoder->dev;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_connector *intel_connector = intel_dp->attached_connector;
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp, mode_rate;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
 
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode);
intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN,
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
}
 
762,21 → 780,23
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
struct intel_encoder *intel_encoder;
struct intel_dp *intel_dp;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int lane_count = 4;
struct intel_dp_m_n m_n;
int pipe = intel_crtc->pipe;
enum transcoder cpu_transcoder = intel_crtc->cpu_transcoder;
 
/*
* Find the lane count in the intel_encoder private
*/
for_each_encoder_on_crtc(dev, crtc, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP)
{
lane_count = intel_dp->lane_count;
break;
791,17 → 811,25
intel_dp_compute_m_n(intel_crtc->bpp, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
 
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(TRANSDATA_M1(pipe),
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
if (IS_HASWELL(dev)) {
I915_WRITE(PIPE_DATA_M1(cpu_transcoder),
TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(cpu_transcoder), m_n.gmch_n);
I915_WRITE(PIPE_LINK_M1(cpu_transcoder), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(cpu_transcoder), m_n.link_n);
} else if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(TRANSDATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n);
I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m);
I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n);
} else if (IS_VALLEYVIEW(dev)) {
I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
} else {
I915_WRITE(PIPE_GMCH_DATA_M(pipe),
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
m_n.gmch_m);
TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n);
I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m);
I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n);
808,6 → 836,21
}
}
 
void intel_dp_init_link_config(struct intel_dp *intel_dp)
{
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
}
}
 
static void
intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
815,7 → 858,7
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
/*
860,21 → 903,12
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
intel_write_eld(encoder, adjusted_mode);
}
memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
intel_dp->link_configuration[0] = intel_dp->link_bw;
intel_dp->link_configuration[1] = intel_dp->lane_count;
intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
/*
* Check for DPCD version > 1.1 and enhanced framing support
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
(intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
}
 
intel_dp_init_link_config(intel_dp);
 
/* Split out the IBX/CPU vs CPT settings */
 
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
if (is_cpu_edp(intel_dp) && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
931,7 → 965,7
u32 mask,
u32 value)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
 
DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
978,9 → 1012,9
return control;
}
 
static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
1019,7 → 1053,7
 
static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
1037,8 → 1071,18
}
}
 
static void ironlake_panel_vdd_work(struct work_struct *__work)
{
// struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
// struct intel_dp, panel_vdd_work);
// struct drm_device *dev = intel_dp_to_dev(intel_dp);
//
// mutex_lock(&dev->mode_config.mutex);
// ironlake_panel_vdd_off_sync(intel_dp);
// mutex_unlock(&dev->mode_config.mutex);
}
 
static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
{
if (!is_edp(intel_dp))
return;
1061,9 → 1105,9
}
}
 
static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
void ironlake_edp_panel_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
1103,9 → 1147,9
}
}
 
static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
void ironlake_edp_panel_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
1128,10 → 1172,12
ironlake_wait_panel_off(intel_dp);
}
 
static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
u32 pp;
 
if (!is_edp(intel_dp))
1149,11 → 1195,13
pp |= EDP_BLC_ENABLE;
I915_WRITE(PCH_PP_CONTROL, pp);
POSTING_READ(PCH_PP_CONTROL);
 
intel_panel_enable_backlight(dev, pipe);
}
 
static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp;
 
1160,6 → 1208,8
if (!is_edp(intel_dp))
return;
 
intel_panel_disable_backlight(dev);
 
DRM_DEBUG_KMS("\n");
pp = ironlake_get_pp_control(dev_priv);
pp &= ~EDP_BLC_ENABLE;
1170,8 → 1220,9
 
static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
1195,8 → 1246,9
 
static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpa_ctl;
 
1218,7 → 1270,7
}
 
/* If the sink supports it, try to set the power state appropriately */
static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
{
int ret, i;
 
1288,10 → 1340,11
return true;
}
}
 
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
intel_dp->output_reg);
}
 
DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg);
 
return true;
}
 
1386,38 → 1439,6
DP_LINK_STATUS_SIZE);
}
 
static uint8_t
intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int r)
{
return link_status[r - DP_LANE0_1_STATUS];
}
 
static uint8_t
intel_get_adjust_request_voltage(uint8_t adjust_request[2],
int lane)
{
int s = ((lane & 1) ?
DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
uint8_t l = adjust_request[lane>>1];
 
return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
}
 
static uint8_t
intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
int lane)
{
int s = ((lane & 1) ?
DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
uint8_t l = adjust_request[lane>>1];
 
return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
}
 
 
#if 0
static char *voltage_names[] = {
"0.4V", "0.6V", "0.8V", "1.2V"
1438,7 → 1459,7
static uint8_t
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
1451,12 → 1472,24
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
if (IS_HASWELL(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_9_5;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
1484,13 → 1517,12
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
uint8_t voltage_max;
uint8_t preemph_max;
 
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
 
if (this_v > v)
v = this_v;
1607,64 → 1639,88
}
}
 
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
static uint32_t
intel_dp_signal_levels_hsw(uint8_t train_set)
{
int s = (lane & 1) * 4;
uint8_t l = link_status[lane>>1];
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_400MV_0DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_400MV_3_5DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_400MV_6DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
return DDI_BUF_EMP_400MV_9_5DB_HSW;
 
return (l >> s) & 0xf;
}
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_600MV_0DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_600MV_3_5DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
return DDI_BUF_EMP_600MV_6DB_HSW;
 
/* Check for clock recovery is done on all channels */
static bool
intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count)
{
int lane;
uint8_t lane_status;
 
for (lane = 0; lane < lane_count; lane++) {
lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & DP_LANE_CR_DONE) == 0)
return false;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return DDI_BUF_EMP_800MV_0DB_HSW;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return DDI_BUF_EMP_800MV_3_5DB_HSW;
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return DDI_BUF_EMP_400MV_0DB_HSW;
}
return true;
}
 
/* Check to see if channel eq is done on all channels */
#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\
DP_LANE_CHANNEL_EQ_DONE|\
DP_LANE_SYMBOL_LOCKED)
static bool
intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
uint8_t lane_align;
uint8_t lane_status;
int lane;
 
lane_align = intel_dp_link_status(link_status,
DP_LANE_ALIGN_STATUS_UPDATED);
if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
return false;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
lane_status = intel_get_lane_status(link_status, lane);
if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
return false;
}
return true;
}
 
static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_dig_port->port;
int ret;
uint32_t temp;
 
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
if (IS_HASWELL(dev)) {
temp = I915_READ(DP_TP_CTL(port));
 
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
else
temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
 
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), temp);
 
if (wait_for((I915_READ(DP_TP_STATUS(port)) &
DP_TP_STATUS_IDLE_DONE), 1))
DRM_ERROR("Timed out waiting for DP idle patterns\n");
 
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
 
break;
case DP_TRAINING_PATTERN_1:
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
break;
case DP_TRAINING_PATTERN_2:
temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
break;
case DP_TRAINING_PATTERN_3:
temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
break;
}
I915_WRITE(DP_TP_CTL(port), temp);
 
} else if (HAS_PCH_CPT(dev) &&
(IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
 
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1724,10 → 1780,11
}
 
/* Enable corresponding port and start training pattern 1 */
static void
void
intel_dp_start_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
struct drm_device *dev = encoder->dev;
int i;
uint8_t voltage;
bool clock_recovery = false;
1734,6 → 1791,9
int voltage_tries, loop_tries;
uint32_t DP = intel_dp->DP;
 
if (IS_HASWELL(dev))
intel_ddi_prepare_link_retrain(encoder);
 
/* Write the link configuration data */
intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
intel_dp->link_configuration,
1751,8 → 1811,11
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
 
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
if (IS_HASWELL(dev)) {
signal_levels = intel_dp_signal_levels_hsw(
intel_dp->train_set[0]);
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1760,23 → 1823,24
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n",
signal_levels);
 
/* Set training pattern 1 */
if (!intel_dp_set_link_train(intel_dp, DP,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE))
break;
/* Set training pattern 1 */
 
udelay(100);
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
DRM_ERROR("failed to get link status\n");
break;
}
 
if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("clock recovery OK\n");
clock_recovery = true;
break;
1815,10 → 1879,10
intel_dp->DP = DP;
}
 
static void
void
intel_dp_complete_link_train(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
bool channel_eq = false;
int tries, cr_tries;
uint32_t DP = intel_dp->DP;
1838,7 → 1902,10
break;
}
 
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
if (IS_HASWELL(dev)) {
signal_levels = intel_dp_signal_levels_hsw(intel_dp->train_set[0]);
DP = (DP & ~DDI_BUF_EMP_MASK) | signal_levels;
} else if (IS_GEN7(dev) && is_cpu_edp(intel_dp) && !IS_VALLEYVIEW(dev)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1855,18 → 1922,18
DP_LINK_SCRAMBLING_DISABLE))
break;
 
udelay(400);
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status))
break;
 
/* Make sure clock is still ok */
if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
intel_dp_start_link_train(intel_dp);
cr_tries++;
continue;
}
 
if (intel_channel_eq_ok(intel_dp, link_status)) {
if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
channel_eq = true;
break;
}
1885,6 → 1952,9
++tries;
}
 
if (channel_eq)
DRM_DEBUG_KMS("Channel EQ done. DP Training successfull\n");
 
intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE);
}
 
1891,10 → 1961,29
static void
intel_dp_link_down(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct drm_device *dev = intel_dig_port->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
 
/*
* DDI code has a strict mode set sequence and we should try to respect
* it, otherwise we might hang the machine in many different ways. So we
* really should be disabling the port only on a complete crtc_disable
* sequence. This function is just called under two conditions on DDI
* code:
* - Link train failed while doing crtc_enable, and on this case we
* really should respect the mode set sequence and wait for a
* crtc_disable.
* - Someone turned the monitor off and intel_dp_check_link_status
* called us. We don't need to disable the whole port on this case, so
* when someone turns the monitor on again,
* intel_ddi_prepare_link_retrain will take care of redoing the link
* train.
*/
if (IS_HASWELL(dev))
return;
 
if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
return;
 
1913,7 → 2002,7
 
if (HAS_PCH_IBX(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
 
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
2014,7 → 2103,7
intel_dp_handle_test_request(struct intel_dp *intel_dp)
{
/* NAK by default */
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK);
intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
}
 
/*
2026,16 → 2115,17
* 4. Check link status on receipt of hot-plug interrupt
*/
 
static void
void
intel_dp_check_link_status(struct intel_dp *intel_dp)
{
struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
u8 sink_irq_vector;
u8 link_status[DP_LINK_STATUS_SIZE];
 
if (!intel_dp->base.connectors_active)
if (!intel_encoder->connectors_active)
return;
 
if (WARN_ON(!intel_dp->base.base.crtc))
if (WARN_ON(!intel_encoder->base.crtc))
return;
 
/* Try to read receiver status if the link appears to be up */
2064,9 → 2154,9
DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
}
 
if (!intel_channel_eq_ok(intel_dp, link_status)) {
if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
drm_get_encoder_name(&intel_dp->base.base));
drm_get_encoder_name(&intel_encoder->base));
intel_dp_start_link_train(intel_dp);
intel_dp_complete_link_train(intel_dp);
}
2115,11 → 2205,12
static enum drm_connector_status
ironlake_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
enum drm_connector_status status;
 
/* Can't disconnect eDP, but you can close the lid... */
if (is_edp(intel_dp)) {
status = intel_panel_detect(intel_dp->base.base.dev);
status = intel_panel_detect(dev);
if (status == connector_status_unknown)
status = connector_status_connected;
return status;
2131,7 → 2222,7
static enum drm_connector_status
g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
 
2158,44 → 2249,45
static struct edid *
intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
 
/* use cached edid if we have one */
if (intel_connector->edid) {
struct edid *edid;
int size;
 
if (is_edp(intel_dp)) {
if (!intel_dp->edid)
/* invalid edid */
if (IS_ERR(intel_connector->edid))
return NULL;
 
size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edid = kmalloc(size, GFP_KERNEL);
if (!edid)
return NULL;
 
memcpy(edid, intel_dp->edid, size);
memcpy(edid, intel_connector->edid, size);
return edid;
}
 
edid = drm_get_edid(connector, adapter);
return edid;
return drm_get_edid(connector, adapter);
}
 
static int
intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
int ret;
struct intel_connector *intel_connector = to_intel_connector(connector);
 
if (is_edp(intel_dp)) {
drm_mode_connector_update_edid_property(connector,
intel_dp->edid);
ret = drm_add_edid_modes(connector, intel_dp->edid);
drm_edid_to_eld(connector,
intel_dp->edid);
return intel_dp->edid_mode_count;
/* use cached edid if we have one */
if (intel_connector->edid) {
/* invalid edid */
if (IS_ERR(intel_connector->edid))
return 0;
 
return intel_connector_update_modes(connector,
intel_connector->edid);
}
 
ret = intel_ddc_get_modes(connector, adapter);
return ret;
return intel_ddc_get_modes(connector, adapter);
}
 
 
2209,9 → 2301,12
intel_dp_detect(struct drm_connector *connector, bool force)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = intel_dp->base.base.dev;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = connector->dev;
enum drm_connector_status status;
struct edid *edid = NULL;
char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
 
intel_dp->has_audio = false;
 
2220,10 → 2315,9
else
status = g4x_dp_detect(intel_dp);
 
DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n",
intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2],
intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5],
intel_dp->dpcd[6], intel_dp->dpcd[7]);
// hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
// 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
// DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
if (status != connector_status_connected)
return status;
2230,9 → 2324,8
 
intel_dp_probe_oui(intel_dp);
 
/*
if (intel_dp->force_audio) {
intel_dp->has_audio = intel_dp->force_audio > 0;
if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
} else {
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
2240,7 → 2333,9
kfree(edid);
}
}
*/
 
if (intel_encoder->type != INTEL_OUTPUT_EDP)
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
return connector_status_connected;
}
 
2247,8 → 2342,8
static int intel_dp_get_modes(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = intel_dp->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
int ret;
 
/* We should parse the EDID data and find out if it has an audio sink
2255,35 → 2350,15
*/
 
ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
if (ret) {
if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) {
struct drm_display_mode *newmode;
list_for_each_entry(newmode, &connector->probed_modes,
head) {
if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) {
intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, newmode);
break;
}
}
}
if (ret)
return ret;
}
 
/* if eDP has no EDID, try to use fixed panel mode from VBT */
if (is_edp(intel_dp)) {
/* initialize panel mode from VBT if available for eDP */
if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) {
intel_dp->panel_fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (intel_dp->panel_fixed_mode) {
intel_dp->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
}
}
if (intel_dp->panel_fixed_mode) {
/* if eDP has no EDID, fall back to fixed mode */
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode);
mode = drm_mode_duplicate(dev,
intel_connector->panel.fixed_mode);
if (mode) {
drm_mode_probed_add(connector, mode);
return 1;
}
2291,10 → 2366,22
return 0;
}
 
static bool
intel_dp_detect_audio(struct drm_connector *connector)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct edid *edid;
bool has_audio = false;
 
edid = intel_dp_get_edid(connector, &intel_dp->adapter);
if (edid) {
has_audio = drm_detect_monitor_audio(edid);
kfree(edid);
}
 
return has_audio;
}
 
 
static int
intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
2301,10 → 2388,12
uint64_t val)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
int ret;
 
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
#if 0
2338,11 → 2427,27
}
#endif
 
if (is_edp(intel_dp) &&
property == connector->dev->mode_config.scaling_mode_property) {
if (val == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
return -EINVAL;
}
 
if (intel_connector->panel.fitting_mode == val) {
/* the eDP scaling property is not changed */
return 0;
}
intel_connector->panel.fitting_mode = val;
 
goto done;
}
 
return -EINVAL;
 
done:
if (intel_dp->base.base.crtc) {
struct drm_crtc *crtc = intel_dp->base.base.crtc;
if (intel_encoder->base.crtc) {
struct drm_crtc *crtc = intel_encoder->base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
2355,9 → 2460,15
{
struct drm_device *dev = connector->dev;
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
 
if (is_edp(intel_dp))
if (!IS_ERR_OR_NULL(intel_connector->edid))
kfree(intel_connector->edid);
 
if (is_edp(intel_dp)) {
intel_panel_destroy_backlight(dev);
intel_panel_fini(&intel_connector->panel);
}
 
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
2364,17 → 2475,18
kfree(connector);
}
 
static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
 
// i2c_del_adapter(&intel_dp->adapter);
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
// cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
ironlake_panel_vdd_off_sync(intel_dp);
}
kfree(intel_dp);
kfree(intel_dig_port);
}
 
static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
2404,7 → 2516,7
static void
intel_dp_hot_plug(struct intel_encoder *intel_encoder)
{
struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base);
struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
intel_dp_check_link_status(intel_dp);
}
2414,13 → 2526,14
intel_trans_dp_port_sel(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct intel_encoder *encoder;
struct intel_encoder *intel_encoder;
struct intel_dp *intel_dp;
 
for_each_encoder_on_crtc(dev, crtc, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
intel_dp = enc_to_intel_dp(&intel_encoder->base);
 
if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
intel_dp->base.type == INTEL_OUTPUT_EDP)
if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
intel_encoder->type == INTEL_OUTPUT_EDP)
return intel_dp->output_reg;
}
 
2450,79 → 2563,205
static void
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
{
struct intel_connector *intel_connector = to_intel_connector(connector);
 
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
 
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);
drm_object_attach_property(
&connector->base,
connector->dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
}
}
 
static void
intel_dp_init_panel_power_sequencer(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *out)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct edp_power_seq cur, vbt, spec, final;
u32 pp_on, pp_off, pp_div, pp;
 
/* Workaround: Need to write PP_CONTROL with the unlock key as
* the very first thing. */
pp = ironlake_get_pp_control(dev_priv);
I915_WRITE(PCH_PP_CONTROL, pp);
 
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
 
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
 
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
 
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
 
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
 
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
 
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
 
vbt = dev_priv->edp.pps;
 
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
spec.t1_t3 = 210 * 10;
spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
spec.t10 = 500 * 10;
/* This one is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
* table multiplies it with 1000 to make it in units of 100usec,
* too. */
spec.t11_t12 = (510 + 100) * 10;
 
DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
 
/* Use the max of the register settings and vbt. If both are
* unset, fall back to the spec limits. */
#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
spec.field : \
max(cur.field, vbt.field))
assign_final(t1_t3);
assign_final(t8);
assign_final(t9);
assign_final(t10);
assign_final(t11_t12);
#undef assign_final
 
#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
intel_dp->panel_power_down_delay = get_delay(t10);
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
 
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
intel_dp->panel_power_cycle_delay);
 
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
 
if (out)
*out = final;
}
 
static void
intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
struct intel_dp *intel_dp,
struct edp_power_seq *seq)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_on, pp_off, pp_div;
 
/* And finally store the new values in the power sequencer. */
pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
(seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
(seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
/* Compute the divisor for the pp clock, simply match the Bspec
* formula. */
pp_div = ((100 * intel_pch_rawclk(dev))/2 - 1)
<< PP_REFERENCE_DIVIDER_SHIFT;
pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
<< PANEL_POWER_CYCLE_DELAY_SHIFT);
 
/* Haswell doesn't have any port selection bits for the panel
* power sequencer any more. */
if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
if (is_cpu_edp(intel_dp))
pp_on |= PANEL_POWER_PORT_DP_A;
else
pp_on |= PANEL_POWER_PORT_DP_D;
}
 
I915_WRITE(PCH_PP_ON_DELAYS, pp_on);
I915_WRITE(PCH_PP_OFF_DELAYS, pp_off);
I915_WRITE(PCH_PP_DIVISOR, pp_div);
 
DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
I915_READ(PCH_PP_ON_DELAYS),
I915_READ(PCH_PP_OFF_DELAYS),
I915_READ(PCH_PP_DIVISOR));
}
 
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_dp *intel_dp;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct drm_display_mode *fixed_mode = NULL;
struct edp_power_seq power_seq = { 0 };
enum port port = intel_dig_port->port;
const char *name = NULL;
int type;
 
intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL);
if (!intel_dp)
return;
 
intel_dp->output_reg = output_reg;
intel_dp->port = port;
/* Preserve the current hw state. */
intel_dp->DP = I915_READ(intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dp);
return;
}
intel_encoder = &intel_dp->base;
 
if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D)
if (HAS_PCH_SPLIT(dev) && port == PORT_D)
if (intel_dpd_is_edp(dev))
intel_dp->is_pch_edp = true;
 
if (output_reg == DP_A || is_pch_edp(intel_dp)) {
/*
* FIXME : We need to initialize built-in panels before external panels.
* For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
*/
if (IS_VALLEYVIEW(dev) && port == PORT_C) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else if (port == PORT_A || is_pch_edp(intel_dp)) {
type = DRM_MODE_CONNECTOR_eDP;
intel_encoder->type = INTEL_OUTPUT_EDP;
} else {
/* The intel_encoder->type value may be INTEL_OUTPUT_UNKNOWN for
* DDI or INTEL_OUTPUT_DISPLAYPORT for the older gens, so don't
* rewrite it.
*/
type = DRM_MODE_CONNECTOR_DisplayPort;
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
}
 
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
 
connector->polled = DRM_CONNECTOR_POLL_HPD;
 
intel_encoder->cloneable = false;
 
// INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
// ironlake_panel_vdd_work);
 
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
ironlake_panel_vdd_work);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
drm_sysfs_connector_add(connector);
 
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
if (IS_HASWELL(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 
/* Set up the DDC bus. */
switch (port) {
case PORT_A:
2545,66 → 2784,15
break;
}
 
/* Cache some DPCD data in the eDP case */
if (is_edp(intel_dp)) {
struct edp_power_seq cur, vbt;
u32 pp_on, pp_off, pp_div;
if (is_edp(intel_dp))
intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
 
pp_on = I915_READ(PCH_PP_ON_DELAYS);
pp_off = I915_READ(PCH_PP_OFF_DELAYS);
pp_div = I915_READ(PCH_PP_DIVISOR);
 
if (!pp_on || !pp_off || !pp_div) {
DRM_INFO("bad panel power sequencing delays, disabling panel\n");
intel_dp_encoder_destroy(&intel_dp->base.base);
intel_dp_destroy(&intel_connector->base);
return;
}
 
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
 
cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
PANEL_LIGHT_ON_DELAY_SHIFT;
 
cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
PANEL_LIGHT_OFF_DELAY_SHIFT;
 
cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
PANEL_POWER_DOWN_DELAY_SHIFT;
 
cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
 
DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
 
vbt = dev_priv->edp.pps;
 
DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
 
#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10)
 
intel_dp->panel_power_up_delay = get_delay(t1_t3);
intel_dp->backlight_on_delay = get_delay(t8);
intel_dp->backlight_off_delay = get_delay(t9);
intel_dp->panel_power_down_delay = get_delay(t10);
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
 
DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
intel_dp->panel_power_cycle_delay);
 
DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
}
 
intel_dp_i2c_init(intel_dp, intel_connector, name);
 
/* Cache DPCD and EDID for edp. */
if (is_edp(intel_dp)) {
bool ret;
struct drm_display_mode *scan;
struct edid *edid;
 
ironlake_edp_panel_vdd_on(intel_dp);
2619,29 → 2807,51
} else {
/* if this fails, presume the device is a ghost */
DRM_INFO("failed to retrieve link info, disabling eDP\n");
intel_dp_encoder_destroy(&intel_dp->base.base);
intel_dp_destroy(&intel_connector->base);
intel_dp_encoder_destroy(&intel_encoder->base);
intel_dp_destroy(connector);
return;
}
 
/* We now know it's not a ghost, init power sequence regs. */
intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
&power_seq);
 
ironlake_edp_panel_vdd_on(intel_dp);
edid = drm_get_edid(connector, &intel_dp->adapter);
if (edid) {
drm_mode_connector_update_edid_property(connector,
edid);
intel_dp->edid_mode_count =
drm_add_edid_modes(connector, edid);
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_edid_to_eld(connector, edid);
intel_dp->edid = edid;
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
}
} else {
edid = ERR_PTR(-ENOENT);
}
intel_connector->edid = edid;
 
/* prefer fixed mode from EDID if available */
list_for_each_entry(scan, &connector->probed_modes, head) {
if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
fixed_mode = drm_mode_duplicate(dev, scan);
break;
}
}
 
/* fallback to VBT if available for eDP */
if (!fixed_mode && dev_priv->lfp_lvds_vbt_mode) {
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (fixed_mode)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
 
ironlake_edp_panel_vdd_off(intel_dp, false);
}
 
intel_encoder->hot_plug = intel_dp_hot_plug;
 
if (is_edp(intel_dp)) {
dev_priv->int_edp_connector = connector;
intel_panel_setup_backlight(dev);
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
}
 
intel_dp_add_properties(intel_dp, connector);
2655,3 → 2865,45
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
 
void
intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
if (!intel_dig_port)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
}
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
 
intel_encoder->enable = intel_enable_dp;
intel_encoder->pre_enable = intel_pre_enable_dp;
intel_encoder->disable = intel_disable_dp;
intel_encoder->post_disable = intel_post_disable_dp;
intel_encoder->get_hw_state = intel_dp_get_hw_state;
 
intel_dig_port->port = port;
intel_dig_port->dp.output_reg = output_reg;
 
intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
intel_encoder->hot_plug = intel_dp_hot_plug;
 
intel_dp_init_connector(intel_dig_port, intel_connector);
}
/drivers/video/drm/i915/intel_drv.h
103,26 → 103,8
#define INTEL_OUTPUT_HDMI 6
#define INTEL_OUTPUT_DISPLAYPORT 7
#define INTEL_OUTPUT_EDP 8
#define INTEL_OUTPUT_UNKNOWN 9
 
/* Intel Pipe Clone Bit */
#define INTEL_HDMIB_CLONE_BIT 1
#define INTEL_HDMIC_CLONE_BIT 2
#define INTEL_HDMID_CLONE_BIT 3
#define INTEL_HDMIE_CLONE_BIT 4
#define INTEL_HDMIF_CLONE_BIT 5
#define INTEL_SDVO_NON_TV_CLONE_BIT 6
#define INTEL_SDVO_TV_CLONE_BIT 7
#define INTEL_SDVO_LVDS_CLONE_BIT 8
#define INTEL_ANALOG_CLONE_BIT 9
#define INTEL_TV_CLONE_BIT 10
#define INTEL_DP_B_CLONE_BIT 11
#define INTEL_DP_C_CLONE_BIT 12
#define INTEL_DP_D_CLONE_BIT 13
#define INTEL_LVDS_CLONE_BIT 14
#define INTEL_DVO_TMDS_CLONE_BIT 15
#define INTEL_DVO_LVDS_CLONE_BIT 16
#define INTEL_EDP_CLONE_BIT 17
 
#define INTEL_DVO_CHIP_NONE 0
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
191,6 → 173,11
int crtc_mask;
};
 
struct intel_panel {
struct drm_display_mode *fixed_mode;
int fitting_mode;
};
 
struct intel_connector {
struct drm_connector base;
/*
207,6 → 194,12
/* Reads out the current hw, returning true if the connector is enabled
* and active (i.e. dpms ON state). */
bool (*get_hw_state)(struct intel_connector *);
 
/* Panel info for eDP and LVDS */
struct intel_panel panel;
 
/* Cached EDID for eDP and LVDS. May hold ERR_PTR for invalid EDID. */
struct edid *edid;
};
 
struct intel_crtc {
213,6 → 206,7
struct drm_crtc base;
enum pipe pipe;
enum plane plane;
enum transcoder cpu_transcoder;
u8 lut_r[256], lut_g[256], lut_b[256];
/*
* Whether the crtc and the connected output pipeline is active. Implies
226,6 → 220,8
struct intel_unpin_work *unpin_work;
int fdi_lanes;
 
atomic_t unpin_work_count;
 
/* Display surface base address adjustement for pageflips. Note that on
* gen4+ this only adjusts up to a tile, offsets within a tile are
* handled in the hw itself (with the TILEOFF register). */
240,6 → 236,7
 
/* We can share PLLs across outputs if the timings match */
struct intel_pch_pll *pch_pll;
uint32_t ddi_pll_sel;
};
 
struct intel_plane {
246,6 → 243,7
struct drm_plane base;
enum pipe pipe;
struct drm_i915_gem_object *obj;
bool can_scale;
int max_downscale;
u32 lut_r[1024], lut_g[1024], lut_b[1024];
void (*update_plane)(struct drm_plane *plane,
345,10 → 343,8
} __attribute__((packed));
 
struct intel_hdmi {
struct intel_encoder base;
u32 sdvox_reg;
int ddc_bus;
int ddi_port;
uint32_t color_range;
bool has_hdmi_sink;
bool has_audio;
359,18 → 355,15
struct drm_display_mode *adjusted_mode);
};
 
#define DP_RECEIVER_CAP_SIZE 0xf
#define DP_MAX_DOWNSTREAM_PORTS 0x10
#define DP_LINK_CONFIGURATION_SIZE 9
 
struct intel_dp {
struct intel_encoder base;
uint32_t output_reg;
uint32_t DP;
uint8_t link_configuration[DP_LINK_CONFIGURATION_SIZE];
bool has_audio;
enum hdmi_force_audio force_audio;
enum port port;
uint32_t color_range;
uint8_t link_bw;
uint8_t lane_count;
385,13 → 378,18
int panel_power_cycle_delay;
int backlight_on_delay;
int backlight_off_delay;
struct drm_display_mode *panel_fixed_mode; /* for eDP */
struct delayed_work panel_vdd_work;
bool want_panel_vdd;
struct edid *edid; /* cached EDID for eDP */
int edid_mode_count;
struct intel_connector *attached_connector;
};
 
struct intel_digital_port {
struct intel_encoder base;
enum port port;
struct intel_dp dp;
struct intel_hdmi hdmi;
};
 
static inline struct drm_crtc *
intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
{
408,11 → 406,14
 
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
struct drm_crtc *crtc;
struct drm_i915_gem_object *old_fb_obj;
struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
atomic_t pending;
#define INTEL_FLIP_INACTIVE 0
#define INTEL_FLIP_PENDING 1
#define INTEL_FLIP_COMPLETE 2
bool enable_stall_check;
};
 
423,6 → 424,8
int interval;
};
 
int intel_pch_rawclk(struct drm_device *dev);
 
int intel_connector_update_modes(struct drm_connector *connector,
struct edid *edid);
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
433,7 → 436,12
extern void intel_crt_init(struct drm_device *dev);
extern void intel_hdmi_init(struct drm_device *dev,
int sdvox_reg, enum port port);
extern void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
extern struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder);
extern bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
bool is_sdvob);
446,10 → 454,27
extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
enum port port);
extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
extern bool intel_dp_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void ironlake_edp_backlight_on(struct intel_dp *intel_dp);
extern void ironlake_edp_backlight_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_off(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp);
extern void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern int intel_edp_target_clock(struct intel_encoder *,
struct drm_display_mode *mode);
459,6 → 484,10
enum plane plane);
 
/* intel_panel.c */
extern int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode);
extern void intel_panel_fini(struct intel_panel *panel);
 
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
extern void intel_pch_panel_fitting(struct drm_device *dev,
467,7 → 496,7
struct drm_display_mode *adjusted_mode);
extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
extern int intel_panel_setup_backlight(struct drm_device *dev);
extern int intel_panel_setup_backlight(struct drm_connector *connector);
extern void intel_panel_enable_backlight(struct drm_device *dev,
enum pipe pipe);
extern void intel_panel_disable_backlight(struct drm_device *dev);
501,6 → 530,31
return to_intel_connector(connector)->encoder;
}
 
static inline struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->dp;
}
 
static inline struct intel_digital_port *
enc_to_dig_port(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_digital_port, base.base);
}
 
static inline struct intel_digital_port *
dp_to_dig_port(struct intel_dp *intel_dp)
{
return container_of(intel_dp, struct intel_digital_port, dp);
}
 
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
return container_of(intel_hdmi, struct intel_digital_port, hdmi);
}
 
extern void intel_connector_attach_encoder(struct intel_connector *connector,
struct intel_encoder *encoder);
extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
509,8 → 563,12
struct drm_crtc *crtc);
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern enum transcoder
intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
enum pipe pipe);
extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
extern int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
 
struct intel_load_detect_pipe {
struct drm_framebuffer *release_fb;
578,6 → 636,10
extern void intel_update_linetime_watermarks(struct drm_device *dev, int pipe,
struct drm_display_mode *mode);
 
extern unsigned long intel_gen4_compute_offset_xtiled(int *x, int *y,
unsigned int bpp,
unsigned int pitch);
 
extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
601,12 → 663,22
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);
extern void ironlake_teardown_rc6(struct drm_device *dev);
 
extern void intel_enable_ddi(struct intel_encoder *encoder);
extern void intel_disable_ddi(struct intel_encoder *encoder);
extern bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
enum pipe *pipe);
extern void intel_ddi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
extern int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv);
extern void intel_ddi_pll_init(struct drm_device *dev);
extern void intel_ddi_enable_pipe_func(struct drm_crtc *crtc);
extern void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder);
extern void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc);
extern void intel_ddi_setup_hw_pll_state(struct drm_device *dev);
extern bool intel_ddi_pll_mode_set(struct drm_crtc *crtc, int clock);
extern void intel_ddi_put_crtc_pll(struct drm_crtc *crtc);
extern void intel_ddi_set_pipe_settings(struct drm_crtc *crtc);
extern void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder);
extern bool
intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector);
extern void intel_ddi_fdi_disable(struct drm_crtc *crtc);
 
#endif /* __INTEL_DRV_H__ */
/drivers/video/drm/i915/intel_hdmi.c
36,10 → 36,15
#include <drm/i915_drm.h>
#include "i915_drv.h"
 
static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
{
return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
}
 
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t enabled_bits;
 
51,13 → 56,14
 
struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_hdmi, base.base);
struct intel_digital_port *intel_dig_port =
container_of(encoder, struct intel_digital_port, base.base);
return &intel_dig_port->hdmi;
}
 
static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_hdmi, base);
return enc_to_intel_hdmi(&intel_attached_encoder(connector)->base);
}
 
void intel_dip_infoframe_csum(struct dip_infoframe *frame)
334,6 → 340,8
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
 
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
 
intel_set_infoframe(encoder, &avi_if);
}
 
754,7 → 762,7
return MODE_OK;
}
 
static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
763,7 → 771,7
 
static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi->base.base.dev;
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t bit;
 
786,6 → 794,9
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_i915_private *dev_priv = connector->dev->dev_private;
struct edid *edid;
enum drm_connector_status status = connector_status_disconnected;
814,6 → 825,7
if (intel_hdmi->force_audio != HDMI_AUDIO_AUTO)
intel_hdmi->has_audio =
(intel_hdmi->force_audio == HDMI_AUDIO_ON);
intel_encoder->type = INTEL_OUTPUT_HDMI;
}
 
return status;
859,10 → 871,12
uint64_t val)
{
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
struct intel_digital_port *intel_dig_port =
hdmi_to_dig_port(intel_hdmi);
struct drm_i915_private *dev_priv = connector->dev->dev_private;
int ret;
 
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
#if 0
899,8 → 913,8
return -EINVAL;
 
done:
if (intel_hdmi->base.base.crtc) {
struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
if (intel_dig_port->base.base.crtc) {
struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
intel_set_mode(crtc, &crtc->mode,
crtc->x, crtc->y, crtc->fb);
}
915,12 → 929,6
kfree(connector);
}
 
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs_hsw = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_ddi_mode_set,
.disable = intel_encoder_noop,
};
 
static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
.mode_fixup = intel_hdmi_mode_fixup,
.mode_set = intel_hdmi_mode_set,
952,43 → 960,24
intel_attach_broadcast_rgb_property(connector);
}
 
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
struct intel_connector *intel_connector)
{
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct intel_encoder *intel_encoder = &intel_dig_port->base;
struct drm_device *dev = intel_encoder->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_connector *connector;
struct intel_encoder *intel_encoder;
struct intel_connector *intel_connector;
struct intel_hdmi *intel_hdmi;
enum port port = intel_dig_port->port;
 
intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
if (!intel_hdmi)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_hdmi);
return;
}
 
intel_encoder = &intel_hdmi->base;
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
 
connector = &intel_connector->base;
drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA);
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
 
intel_encoder->type = INTEL_OUTPUT_HDMI;
 
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
intel_encoder->cloneable = false;
 
intel_hdmi->ddi_port = port;
switch (port) {
case PORT_B:
intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
1008,8 → 997,6
BUG();
}
 
intel_hdmi->sdvox_reg = sdvox_reg;
 
if (!HAS_PCH_SPLIT(dev)) {
intel_hdmi->write_infoframe = g4x_write_infoframe;
intel_hdmi->set_infoframes = g4x_set_infoframes;
1027,22 → 1014,11
intel_hdmi->set_infoframes = cpt_set_infoframes;
}
 
if (IS_HASWELL(dev)) {
intel_encoder->enable = intel_enable_ddi;
intel_encoder->disable = intel_disable_ddi;
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs_hsw);
} else {
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
drm_encoder_helper_add(&intel_encoder->base,
&intel_hdmi_helper_funcs);
}
if (IS_HASWELL(dev))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
 
 
intel_hdmi_add_properties(intel_hdmi, connector);
 
intel_connector_attach_encoder(intel_connector, intel_encoder);
1057,3 → 1033,42
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
}
 
void intel_hdmi_init(struct drm_device *dev, int sdvox_reg, enum port port)
{
struct intel_digital_port *intel_dig_port;
struct intel_encoder *intel_encoder;
struct drm_encoder *encoder;
struct intel_connector *intel_connector;
 
intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
if (!intel_dig_port)
return;
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_dig_port);
return;
}
 
intel_encoder = &intel_dig_port->base;
encoder = &intel_encoder->base;
 
drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 
intel_encoder->enable = intel_enable_hdmi;
intel_encoder->disable = intel_disable_hdmi;
intel_encoder->get_hw_state = intel_hdmi_get_hw_state;
 
intel_encoder->type = INTEL_OUTPUT_HDMI;
intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
intel_encoder->cloneable = false;
 
intel_dig_port->port = port;
intel_dig_port->hdmi.sdvox_reg = sdvox_reg;
intel_dig_port->dp.output_reg = 0;
 
intel_hdmi_init_connector(intel_dig_port, intel_connector);
}
/drivers/video/drm/i915/intel_i2c.c
1,4 → 1,4
/*
/*
* Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
* Copyright © 2006-2008,2010 Intel Corporation
* Jesse Barnes <jesse.barnes@intel.com>
432,7 → 432,7
I915_WRITE(GMBUS0 + reg_offset, 0);
 
/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
bus->force_bit = true;
bus->force_bit = 1;
ret = i2c_bit_algo.master_xfer(adapter, msgs, num);
 
out:
491,10 → 491,13
 
/* gmbus seems to be broken on i830 */
if (IS_I830(dev))
bus->force_bit = true;
bus->force_bit = 1;
 
intel_gpio_setup(bus, port);
 
ret = i2c_add_adapter(&bus->adapter);
if (ret)
goto err;
}
 
intel_i2c_reset(dev_priv->dev);
502,10 → 505,10
return 0;
 
err:
// while (--i) {
// struct intel_gmbus *bus = &dev_priv->gmbus[i];
// i2c_del_adapter(&bus->adapter);
// }
while (--i) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
i2c_del_adapter(&bus->adapter);
}
return ret;
}
 
529,7 → 532,10
{
struct intel_gmbus *bus = to_intel_gmbus(adapter);
 
bus->force_bit = force_bit;
bus->force_bit += force_bit ? 1 : -1;
DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
force_bit ? "en" : "dis", adapter->name,
bus->force_bit);
}
 
void intel_teardown_gmbus(struct drm_device *dev)
539,6 → 545,6
 
for (i = 0; i < GMBUS_NUM_PORTS; i++) {
struct intel_gmbus *bus = &dev_priv->gmbus[i];
// i2c_del_adapter(&bus->adapter);
i2c_del_adapter(&bus->adapter);
}
}
/drivers/video/drm/i915/intel_lvds.c
40,28 → 40,30
//#include <linux/acpi.h>
 
/* Private structure for the integrated LVDS support */
struct intel_lvds {
struct intel_lvds_connector {
struct intel_connector base;
 
// struct notifier_block lid_notifier;
};
 
struct intel_lvds_encoder {
struct intel_encoder base;
 
struct edid *edid;
 
int fitting_mode;
u32 pfit_control;
u32 pfit_pgm_ratios;
bool pfit_dirty;
 
struct drm_display_mode *fixed_mode;
struct intel_lvds_connector *attached_connector;
};
 
static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
static struct intel_lvds_encoder *to_lvds_encoder(struct drm_encoder *encoder)
{
return container_of(encoder, struct intel_lvds, base.base);
return container_of(encoder, struct intel_lvds_encoder, base.base);
}
 
static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
static struct intel_lvds_connector *to_lvds_connector(struct drm_connector *connector)
{
return container_of(intel_attached_encoder(connector),
struct intel_lvds, base);
return container_of(connector, struct intel_lvds_connector, base.base);
}
 
static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
96,7 → 98,7
static void intel_enable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
113,7 → 115,7
 
I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
 
if (intel_lvds->pfit_dirty) {
if (lvds_encoder->pfit_dirty) {
/*
* Enable automatic panel scaling so that non-native modes
* fill the screen. The panel fitter should only be
121,12 → 123,12
* register description and PRM.
*/
DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
intel_lvds->pfit_control,
intel_lvds->pfit_pgm_ratios);
lvds_encoder->pfit_control,
lvds_encoder->pfit_pgm_ratios);
 
I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
intel_lvds->pfit_dirty = false;
I915_WRITE(PFIT_PGM_RATIOS, lvds_encoder->pfit_pgm_ratios);
I915_WRITE(PFIT_CONTROL, lvds_encoder->pfit_control);
lvds_encoder->pfit_dirty = false;
}
 
I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
140,7 → 142,7
static void intel_disable_lvds(struct intel_encoder *encoder)
{
struct drm_device *dev = encoder->base.dev;
struct intel_lvds *intel_lvds = to_intel_lvds(&encoder->base);
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 ctl_reg, lvds_reg, stat_reg;
 
160,9 → 162,9
if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000))
DRM_ERROR("timed out waiting for panel to power off\n");
 
if (intel_lvds->pfit_control) {
if (lvds_encoder->pfit_control) {
I915_WRITE(PFIT_CONTROL, 0);
intel_lvds->pfit_dirty = true;
lvds_encoder->pfit_dirty = true;
}
 
I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
172,8 → 174,8
static int intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
 
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
249,8 → 251,10
{
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
struct intel_crtc *intel_crtc = intel_lvds->base.new_crtc;
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(encoder);
struct intel_connector *intel_connector =
&lvds_encoder->attached_connector->base;
struct intel_crtc *intel_crtc = lvds_encoder->base.new_crtc;
u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
int pipe;
 
260,7 → 264,7
return false;
}
 
if (intel_encoder_check_is_cloned(&intel_lvds->base))
if (intel_encoder_check_is_cloned(&lvds_encoder->base))
return false;
 
/*
269,10 → 273,12
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
 
if (HAS_PCH_SPLIT(dev)) {
intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
intel_pch_panel_fitting(dev,
intel_connector->panel.fitting_mode,
mode, adjusted_mode);
return true;
}
298,7 → 304,7
 
drm_mode_set_crtcinfo(adjusted_mode, 0);
 
switch (intel_lvds->fitting_mode) {
switch (intel_connector->panel.fitting_mode) {
case DRM_MODE_SCALE_CENTER:
/*
* For centered modes, we have to calculate border widths &
396,11 → 402,11
if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 
if (pfit_control != intel_lvds->pfit_control ||
pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
intel_lvds->pfit_control = pfit_control;
intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
intel_lvds->pfit_dirty = true;
if (pfit_control != lvds_encoder->pfit_control ||
pfit_pgm_ratios != lvds_encoder->pfit_pgm_ratios) {
lvds_encoder->pfit_control = pfit_control;
lvds_encoder->pfit_pgm_ratios = pfit_pgm_ratios;
lvds_encoder->pfit_dirty = true;
}
dev_priv->lvds_border_bits = border;
 
449,14 → 455,15
*/
static int intel_lvds_get_modes(struct drm_connector *connector)
{
struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct intel_lvds_connector *lvds_connector = to_lvds_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
 
if (intel_lvds->edid)
return drm_add_edid_modes(connector, intel_lvds->edid);
/* use cached edid if we have one */
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
return drm_add_edid_modes(connector, lvds_connector->base.edid);
 
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
mode = drm_mode_duplicate(dev, lvds_connector->base.panel.fixed_mode);
if (mode == NULL)
return 0;
 
497,10 → 504,11
static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
void *unused)
{
struct drm_i915_private *dev_priv =
container_of(nb, struct drm_i915_private, lid_notifier);
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector = dev_priv->int_lvds_connector;
struct intel_lvds_connector *lvds_connector =
container_of(nb, struct intel_lvds_connector, lid_notifier);
struct drm_connector *connector = &lvds_connector->base.base;
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
509,9 → 517,7
* check and update the status of LVDS connector after receiving
* the LID nofication event.
*/
if (connector)
connector->status = connector->funcs->detect(connector,
false);
connector->status = connector->funcs->detect(connector, false);
 
/* Don't force modeset on machines where it causes a GPU lockup */
if (dmi_check_system(intel_no_modeset_on_lid))
527,7 → 533,7
dev_priv->modeset_on_lid = 0;
 
mutex_lock(&dev->mode_config.mutex);
intel_modeset_check_state(dev);
intel_modeset_setup_hw_state(dev, true);
mutex_unlock(&dev->mode_config.mutex);
 
return NOTIFY_OK;
543,13 → 549,16
*/
static void intel_lvds_destroy(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds_connector *lvds_connector =
to_lvds_connector(connector);
 
intel_panel_destroy_backlight(dev);
 
// if (dev_priv->lid_notifier.notifier_call)
// acpi_lid_notifier_unregister(&dev_priv->lid_notifier);
if (!IS_ERR_OR_NULL(lvds_connector->base.edid))
kfree(lvds_connector->base.edid);
 
intel_panel_destroy_backlight(connector->dev);
intel_panel_fini(&lvds_connector->base.panel);
 
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
559,11 → 568,11
struct drm_property *property,
uint64_t value)
{
struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_device *dev = connector->dev;
 
if (property == dev->mode_config.scaling_mode_property) {
struct drm_crtc *crtc = intel_lvds->base.base.crtc;
struct drm_crtc *crtc;
 
if (value == DRM_MODE_SCALE_NONE) {
DRM_DEBUG_KMS("no scaling not supported\n");
570,11 → 579,13
return -EINVAL;
}
 
if (intel_lvds->fitting_mode == value) {
if (intel_connector->panel.fitting_mode == value) {
/* the LVDS scaling property is not changed */
return 0;
}
intel_lvds->fitting_mode = value;
intel_connector->panel.fitting_mode = value;
 
crtc = intel_attached_encoder(connector)->base.crtc;
if (crtc && crtc->enabled) {
/*
* If the CRTC is enabled, the display will be changed
765,14 → 776,6
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "ZOTAC ZBOXSD-ID12/ID13",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"),
DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Gigabyte GA-D525TUD",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
914,12 → 917,15
bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
struct intel_lvds_encoder *lvds_encoder;
struct intel_encoder *intel_encoder;
struct intel_lvds_connector *lvds_connector;
struct intel_connector *intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_display_mode *fixed_mode = NULL;
struct edid *edid;
struct drm_crtc *crtc;
u32 lvds;
int pipe;
947,23 → 953,25
}
}
 
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
lvds_encoder = kzalloc(sizeof(struct intel_lvds_encoder), GFP_KERNEL);
if (!lvds_encoder)
return false;
}
 
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
lvds_connector = kzalloc(sizeof(struct intel_lvds_connector), GFP_KERNEL);
if (!lvds_connector) {
kfree(lvds_encoder);
return false;
}
 
lvds_encoder->attached_connector = lvds_connector;
 
if (!HAS_PCH_SPLIT(dev)) {
intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
lvds_encoder->pfit_control = I915_READ(PFIT_CONTROL);
}
 
intel_encoder = &intel_lvds->base;
intel_encoder = &lvds_encoder->base;
encoder = &intel_encoder->base;
intel_connector = &lvds_connector->base;
connector = &intel_connector->base;
drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
995,14 → 1003,10
 
/* create the scaling mode property */
drm_mode_create_scaling_mode_property(dev);
/*
* the initial panel fitting mode will be FULL_SCREEN.
*/
 
drm_connector_attach_property(&intel_connector->base,
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_ASPECT);
intel_lvds->fitting_mode = DRM_MODE_SCALE_ASPECT;
intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
/*
* LVDS discovery:
* 1) check for EDID on DDC
1017,20 → 1021,21
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
intel_lvds->edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
pin));
if (intel_lvds->edid) {
if (drm_add_edid_modes(connector,
intel_lvds->edid)) {
edid = drm_get_edid(connector, intel_gmbus_get_adapter(dev_priv, pin));
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
drm_mode_connector_update_edid_property(connector,
intel_lvds->edid);
edid);
} else {
kfree(intel_lvds->edid);
intel_lvds->edid = NULL;
kfree(edid);
edid = ERR_PTR(-EINVAL);
}
} else {
edid = ERR_PTR(-ENOENT);
}
if (!intel_lvds->edid) {
lvds_connector->base.edid = edid;
 
if (IS_ERR_OR_NULL(edid)) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
* handed to valid_mode for checking
1043,22 → 1048,26
 
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
intel_lvds->fixed_mode =
drm_mode_duplicate(dev, scan);
intel_find_lvds_downclock(dev,
intel_lvds->fixed_mode,
DRM_DEBUG_KMS("using preferred mode from EDID: ");
drm_mode_debug_printmodeline(scan);
 
fixed_mode = drm_mode_duplicate(dev, scan);
if (fixed_mode) {
intel_find_lvds_downclock(dev, fixed_mode,
connector);
goto out;
}
}
}
 
/* Failed to get EDID, what about VBT? */
if (dev_priv->lfp_lvds_vbt_mode) {
intel_lvds->fixed_mode =
drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (intel_lvds->fixed_mode) {
intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
DRM_DEBUG_KMS("using mode from VBT: ");
drm_mode_debug_printmodeline(dev_priv->lfp_lvds_vbt_mode);
 
fixed_mode = drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
if (fixed_mode) {
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
1078,16 → 1087,17
crtc = intel_get_crtc_for_pipe(dev, pipe);
 
if (crtc && (lvds & LVDS_PORT_EN)) {
intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
if (intel_lvds->fixed_mode) {
intel_lvds->fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
fixed_mode = intel_crtc_mode_get(dev, crtc);
if (fixed_mode) {
DRM_DEBUG_KMS("using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
goto out;
}
}
 
/* If we still don't have a mode after all that, give up. */
if (!intel_lvds->fixed_mode)
if (!fixed_mode)
goto failed;
 
out:
1107,11 → 1117,10
// DRM_DEBUG_KMS("lid notifier registration failed\n");
// dev_priv->lid_notifier.notifier_call = NULL;
// }
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
 
intel_panel_setup_backlight(dev);
intel_panel_init(&intel_connector->panel, fixed_mode);
intel_panel_setup_backlight(connector);
 
return true;
 
1119,7 → 1128,9
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
if (fixed_mode)
drm_mode_destroy(dev, fixed_mode);
kfree(lvds_encoder);
kfree(lvds_connector);
return false;
}
/drivers/video/drm/i915/intel_modes.c
45,7 → 45,6
drm_mode_connector_update_edid_property(connector, edid);
ret = drm_add_edid_modes(connector, edid);
drm_edid_to_eld(connector, edid);
kfree(edid);
 
return ret;
}
61,12 → 60,16
struct i2c_adapter *adapter)
{
struct edid *edid;
int ret;
 
edid = drm_get_edid(connector, adapter);
if (!edid)
return 0;
 
return intel_connector_update_modes(connector, edid);
ret = intel_connector_update_modes(connector, edid);
kfree(edid);
 
return ret;
}
 
static const struct drm_prop_enum_list force_audio_names[] = {
94,7 → 97,7
 
dev_priv->force_audio_property = prop;
}
drm_connector_attach_property(connector, prop, 0);
drm_object_attach_property(&connector->base, prop, 0);
#endif
}
 
122,6 → 125,6
dev_priv->broadcast_rgb_property = prop;
}
 
drm_connector_attach_property(connector, prop, 0);
drm_object_attach_property(&connector->base, prop, 0);
#endif
}
/drivers/video/drm/i915/intel_panel.c
130,8 → 130,9
return 0;
}
 
static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
static u32 i915_read_blc_pwm_ctl(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 val;
 
/* Restore the CTL value if it lost, e.g. GPU reset */
138,24 → 139,25
 
if (HAS_PCH_SPLIT(dev_priv->dev)) {
val = I915_READ(BLC_PWM_PCH_CTL2);
if (dev_priv->saveBLC_PWM_CTL2 == 0) {
dev_priv->saveBLC_PWM_CTL2 = val;
if (dev_priv->regfile.saveBLC_PWM_CTL2 == 0) {
dev_priv->regfile.saveBLC_PWM_CTL2 = val;
} else if (val == 0) {
I915_WRITE(BLC_PWM_PCH_CTL2,
dev_priv->saveBLC_PWM_CTL2);
val = dev_priv->saveBLC_PWM_CTL2;
val = dev_priv->regfile.saveBLC_PWM_CTL2;
I915_WRITE(BLC_PWM_PCH_CTL2, val);
}
} else {
val = I915_READ(BLC_PWM_CTL);
if (dev_priv->saveBLC_PWM_CTL == 0) {
dev_priv->saveBLC_PWM_CTL = val;
dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
if (dev_priv->regfile.saveBLC_PWM_CTL == 0) {
dev_priv->regfile.saveBLC_PWM_CTL = val;
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->regfile.saveBLC_PWM_CTL2 =
I915_READ(BLC_PWM_CTL2);
} else if (val == 0) {
I915_WRITE(BLC_PWM_CTL,
dev_priv->saveBLC_PWM_CTL);
val = dev_priv->regfile.saveBLC_PWM_CTL;
I915_WRITE(BLC_PWM_CTL, val);
if (INTEL_INFO(dev)->gen >= 4)
I915_WRITE(BLC_PWM_CTL2,
dev_priv->saveBLC_PWM_CTL2);
val = dev_priv->saveBLC_PWM_CTL;
dev_priv->regfile.saveBLC_PWM_CTL2);
}
}
 
164,10 → 166,9
 
static u32 _intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
 
max = i915_read_blc_pwm_ctl(dev_priv);
max = i915_read_blc_pwm_ctl(dev);
 
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
374,27 → 375,24
enum drm_connector_status
intel_panel_detect(struct drm_device *dev)
{
#if 0
struct drm_i915_private *dev_priv = dev->dev_private;
#endif
 
if (i915_panel_ignore_lid)
return i915_panel_ignore_lid > 0 ?
connector_status_connected :
connector_status_disconnected;
 
/* opregion lid state on HP 2540p is wrong at boot up,
* appears to be either the BIOS or Linux ACPI fault */
#if 0
/* Assume that the BIOS does not lie through the OpRegion... */
if (dev_priv->opregion.lid_state)
if (!i915_panel_ignore_lid && dev_priv->opregion.lid_state) {
return ioread32(dev_priv->opregion.lid_state) & 0x1 ?
connector_status_connected :
connector_status_disconnected;
#endif
}
 
switch (i915_panel_ignore_lid) {
case -2:
return connector_status_connected;
case -1:
return connector_status_disconnected;
default:
return connector_status_unknown;
}
}
 
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static int intel_panel_update_status(struct backlight_device *bd)
416,21 → 414,14
.get_brightness = intel_panel_get_brightness,
};
 
int intel_panel_setup_backlight(struct drm_device *dev)
int intel_panel_setup_backlight(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct backlight_properties props;
struct drm_connector *connector;
 
intel_panel_init_backlight(dev);
 
if (dev_priv->int_lvds_connector)
connector = dev_priv->int_lvds_connector;
else if (dev_priv->int_edp_connector)
connector = dev_priv->int_edp_connector;
else
return -ENODEV;
 
memset(&props, 0, sizeof(props));
props.type = BACKLIGHT_RAW;
props.max_brightness = _intel_panel_get_max_backlight(dev);
460,9 → 451,9
backlight_device_unregister(dev_priv->backlight);
}
#else
int intel_panel_setup_backlight(struct drm_device *dev)
int intel_panel_setup_backlight(struct drm_connector *connector)
{
intel_panel_init_backlight(dev);
intel_panel_init_backlight(connector->dev);
return 0;
}
 
471,3 → 462,20
return;
}
#endif
 
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode)
{
panel->fixed_mode = fixed_mode;
 
return 0;
}
 
void intel_panel_fini(struct intel_panel *panel)
{
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
 
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
}
/drivers/video/drm/i915/intel_pm.c
63,6 → 63,14
* i915.i915_enable_fbc parameter
*/
 
static bool intel_crtc_active(struct drm_crtc *crtc)
{
/* Be paranoid as we can arrive here with only partial
* state retrieved from the hardware during setup.
*/
return to_intel_crtc(crtc)->active && crtc->fb && crtc->mode.clock;
}
 
static void i8xx_disable_fbc(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
428,9 → 436,8
* - going to an unsupported config (interlace, pixel multiply, etc.)
*/
list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
if (tmp_crtc->enabled &&
!to_intel_crtc(tmp_crtc)->primary_disabled &&
tmp_crtc->fb) {
if (intel_crtc_active(tmp_crtc) &&
!to_intel_crtc(tmp_crtc)->primary_disabled) {
if (crtc) {
DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1015,7 → 1022,7
struct drm_crtc *crtc, *enabled = NULL;
 
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->enabled && crtc->fb) {
if (intel_crtc_active(crtc)) {
if (enabled)
return NULL;
enabled = crtc;
1109,9 → 1116,7
int entries, tlb_miss;
 
crtc = intel_get_crtc_for_plane(dev, plane);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
if (crtc->fb == NULL || !crtc->enabled || !intel_crtc->active) {
if (!intel_crtc_active(crtc)) {
*cursor_wm = cursor->guard_size;
*plane_wm = display->guard_size;
return false;
1240,7 → 1245,7
int entries;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled)
if (!intel_crtc_active(crtc))
return false;
 
clock = crtc->mode.clock; /* VESA DOT Clock */
1311,6 → 1316,7
struct drm_i915_private *dev_priv = dev->dev_private;
int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
int plane_sr, cursor_sr;
int ignore_plane_sr, ignore_cursor_sr;
unsigned int enabled = 0;
 
vlv_update_drain_latency(dev);
1327,17 → 1333,23
&planeb_wm, &cursorb_wm))
enabled |= 2;
 
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&plane_sr, &cursor_sr))
&plane_sr, &ignore_cursor_sr) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
2*sr_latency_ns,
&valleyview_wm_info,
&valleyview_cursor_wm_info,
&ignore_plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
else
} else {
I915_WRITE(FW_BLC_SELF_VLV,
I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
plane_sr = cursor_sr = 0;
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
1350,10 → 1362,11
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
(I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
static void g4x_update_wm(struct drm_device *dev)
1376,17 → 1389,18
&planeb_wm, &cursorb_wm))
enabled |= 2;
 
plane_sr = cursor_sr = 0;
if (single_plane_enabled(enabled) &&
g4x_compute_srwm(dev, ffs(enabled) - 1,
sr_latency_ns,
&g4x_wm_info,
&g4x_cursor_wm_info,
&plane_sr, &cursor_sr))
&plane_sr, &cursor_sr)) {
I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
else
} else {
I915_WRITE(FW_BLC_SELF,
I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
plane_sr = cursor_sr = 0;
}
 
DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
planea_wm, cursora_wm,
1399,11 → 1413,11
(planeb_wm << DSPFW_PLANEB_SHIFT) |
planea_wm);
I915_WRITE(DSPFW2,
(I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
(I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
(cursora_wm << DSPFW_CURSORA_SHIFT));
/* HPLL off in SR has some issues on G4x... disable it */
I915_WRITE(DSPFW3,
(I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
(I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
(cursor_sr << DSPFW_CURSOR_SR_SHIFT));
}
 
1492,10 → 1506,13
 
fifo_size = dev_priv->display.get_fifo_size(dev, 0);
crtc = intel_get_crtc_for_plane(dev, 0);
if (crtc->enabled && crtc->fb) {
if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
planea_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
wm_info, fifo_size, cpp,
latency_ns);
enabled = crtc;
} else
1503,10 → 1520,13
 
fifo_size = dev_priv->display.get_fifo_size(dev, 1);
crtc = intel_get_crtc_for_plane(dev, 1);
if (crtc->enabled && crtc->fb) {
if (intel_crtc_active(crtc)) {
int cpp = crtc->fb->bits_per_pixel / 8;
if (IS_GEN2(dev))
cpp = 4;
 
planeb_wm = intel_calculate_wm(crtc->mode.clock,
wm_info, fifo_size,
crtc->fb->bits_per_pixel / 8,
wm_info, fifo_size, cpp,
latency_ns);
if (enabled == NULL)
enabled = crtc;
1596,8 → 1616,7
 
planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
dev_priv->display.get_fifo_size(dev, 0),
crtc->fb->bits_per_pixel / 8,
latency_ns);
4, latency_ns);
fwater_lo = I915_READ(FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
 
1830,11 → 1849,113
enabled |= 2;
}
 
if ((dev_priv->num_pipe == 3) &&
g4x_compute_wm0(dev, 2,
/*
* Calculate and update the self-refresh watermark only when one
* display plane is used.
*
* SNB support 3 levels of watermark.
*
* WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
* and disabled in the descending order
*
*/
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
if (!single_plane_enabled(enabled) ||
dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
 
/* WM1 */
if (!ironlake_compute_srwm(dev, 1, enabled,
SNB_READ_WM1_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM1_LP_ILK,
WM1_LP_SR_EN |
(SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM2 */
if (!ironlake_compute_srwm(dev, 2, enabled,
SNB_READ_WM2_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM2_LP_ILK,
WM2_LP_EN |
(SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM3 */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM3_LP_ILK,
WM3_LP_EN |
(SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
(fbc_wm << WM1_LP_FBC_SHIFT) |
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
}
 
static void ivybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
u32 val;
int fbc_wm, plane_wm, cursor_wm;
int ignore_fbc_wm, ignore_plane_wm, ignore_cursor_wm;
unsigned int enabled;
 
enabled = 0;
if (g4x_compute_wm0(dev, 0,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEA_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEA_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
" plane %d, " "cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 1;
}
 
if (g4x_compute_wm0(dev, 1,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEB_ILK);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEB_ILK, val |
((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
" plane %d, cursor: %d\n",
plane_wm, cursor_wm);
enabled |= 2;
}
 
if (g4x_compute_wm0(dev, 2,
&sandybridge_display_wm_info, latency,
&sandybridge_cursor_wm_info, latency,
&plane_wm, &cursor_wm)) {
val = I915_READ(WM0_PIPEC_IVB);
val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
I915_WRITE(WM0_PIPEC_IVB, val |
1894,12 → 2015,17
(plane_wm << WM1_LP_SR_SHIFT) |
cursor_wm);
 
/* WM3 */
/* WM3, note we have to correct the cursor latency */
if (!ironlake_compute_srwm(dev, 3, enabled,
SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&fbc_wm, &plane_wm, &cursor_wm))
&fbc_wm, &plane_wm, &ignore_cursor_wm) ||
!ironlake_compute_srwm(dev, 3, enabled,
2 * SNB_READ_WM3_LATENCY() * 500,
&sandybridge_display_srwm_info,
&sandybridge_cursor_srwm_info,
&ignore_fbc_wm, &ignore_plane_wm, &cursor_wm))
return;
 
I915_WRITE(WM3_LP_ILK,
1948,7 → 2074,7
int entries, tlb_miss;
 
crtc = intel_get_crtc_for_plane(dev, plane);
if (crtc->fb == NULL || !crtc->enabled) {
if (!intel_crtc_active(crtc)) {
*sprite_wm = display->guard_size;
return false;
}
2348,7 → 2474,7
struct drm_i915_private *dev_priv = dev->dev_private;
u32 limits = gen6_rps_limits(dev_priv, &val);
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
WARN_ON(val > dev_priv->rps.max_delay);
WARN_ON(val < dev_priv->rps.min_delay);
 
2423,12 → 2549,12
struct intel_ring_buffer *ring;
u32 rp_state_cap;
u32 gt_perf_status;
u32 pcu_mbox, rc6_mask = 0;
u32 rc6vids, pcu_mbox, rc6_mask = 0;
u32 gtfifodbg;
int rc6_mode;
int i;
int i, ret;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
/* Here begins a magic sequence of register writes to enable
* auto-downclocking.
2522,31 → 2648,17
GEN6_RP_UP_BUSY_AVG |
(IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
 
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
 
I915_WRITE(GEN6_PCODE_DATA, 0);
I915_WRITE(GEN6_PCODE_MAILBOX,
GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
 
/* Check for overclock support */
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
pcu_mbox = I915_READ(GEN6_PCODE_DATA);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500))
DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
if (pcu_mbox & (1<<31)) { /* OC supported */
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
if (!ret) {
pcu_mbox = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
if (ret && pcu_mbox & (1<<31)) { /* OC supported */
dev_priv->rps.max_delay = pcu_mbox & 0xff;
DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
}
} else {
DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
}
 
gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
 
2559,6 → 2671,20
/* enable all PM interrupts */
I915_WRITE(GEN6_PMINTRMSK, 0);
 
rc6vids = 0;
ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
if (IS_GEN6(dev) && ret) {
DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
} else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
rc6vids &= 0xffff00;
rc6vids |= GEN6_ENCODE_RC6_VID(450);
ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
if (ret)
DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
}
 
gen6_gt_force_wake_put(dev_priv);
}
 
2567,10 → 2693,11
{
struct drm_i915_private *dev_priv = dev->dev_private;
int min_freq = 15;
int gpu_freq, ia_freq, max_ia_freq;
int gpu_freq;
unsigned int ia_freq, max_ia_freq;
int scaling_factor = 180;
 
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
max_ia_freq = cpufreq_quick_get_max(0);
/*
2601,19 → 2728,13
else
ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
 
I915_WRITE(GEN6_PCODE_DATA,
(ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
gpu_freq);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
GEN6_PCODE_READY) == 0, 10)) {
DRM_ERROR("pcode write of freq table timed out\n");
continue;
sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
ia_freq | gpu_freq);
}
}
}
#endif
 
void ironlake_teardown_rc6(struct drm_device *dev)
2620,16 → 2741,16
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->renderctx) {
i915_gem_object_unpin(dev_priv->renderctx);
drm_gem_object_unreference(&dev_priv->renderctx->base);
dev_priv->renderctx = NULL;
if (dev_priv->ips.renderctx) {
i915_gem_object_unpin(dev_priv->ips.renderctx);
drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
dev_priv->ips.renderctx = NULL;
}
 
if (dev_priv->pwrctx) {
i915_gem_object_unpin(dev_priv->pwrctx);
drm_gem_object_unreference(&dev_priv->pwrctx->base);
dev_priv->pwrctx = NULL;
if (dev_priv->ips.pwrctx) {
i915_gem_object_unpin(dev_priv->ips.pwrctx);
drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
dev_priv->ips.pwrctx = NULL;
}
}
 
2655,14 → 2776,14
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (!dev_priv->renderctx)
if (dev_priv->ips.renderctx == NULL)
dev_priv->ips.renderctx = intel_alloc_context_page(dev);
if (!dev_priv->ips.renderctx)
return -ENOMEM;
 
if (dev_priv->pwrctx == NULL)
dev_priv->pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->pwrctx) {
if (dev_priv->ips.pwrctx == NULL)
dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
if (!dev_priv->ips.pwrctx) {
ironlake_teardown_rc6(dev);
return -ENOMEM;
}
2674,6 → 2795,7
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
bool was_interruptible;
int ret;
 
/* rc6 disabled by default due to repeated reports of hanging during
2688,6 → 2810,9
if (ret)
return;
 
was_interruptible = dev_priv->mm.interruptible;
dev_priv->mm.interruptible = false;
 
/*
* GPU can automatically power down the render unit if given a page
* to save state.
2695,12 → 2820,13
ret = intel_ring_begin(ring, 6);
if (ret) {
ironlake_teardown_rc6(dev);
dev_priv->mm.interruptible = was_interruptible;
return;
}
 
intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
intel_ring_emit(ring, MI_SET_CONTEXT);
intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
intel_ring_emit(ring, dev_priv->ips.renderctx->gtt_offset |
MI_MM_SPACE_GTT |
MI_SAVE_EXT_STATE_EN |
MI_RESTORE_EXT_STATE_EN |
2715,7 → 2841,8
* does an implicit flush, combined with MI_FLUSH above, it should be
* safe to assume that renderctx is valid
*/
ret = intel_wait_ring_idle(ring);
ret = intel_ring_idle(ring);
dev_priv->mm.interruptible = was_interruptible;
if (ret) {
DRM_ERROR("failed to enable ironlake power power savings\n");
ironlake_teardown_rc6(dev);
2722,7 → 2849,7
return;
}
 
I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(PWRCTXA, dev_priv->ips.pwrctx->gtt_offset | PWRCTX_EN);
I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
}
 
3331,6 → 3458,8
 
void intel_disable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_IRONLAKE_M(dev)) {
ironlake_disable_drps(dev);
ironlake_disable_rc6(dev);
3341,27 → 3470,44
 
void intel_enable_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
// gen6_enable_rps(dev);
// gen6_update_ring_freq(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
* to make resume and init faster.
*/
// schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
// round_jiffies_up_relative(HZ));
}
}
 
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
 
static void ironlake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
/* Required for FBC */
dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
DPFCRUNIT_CLOCK_GATE_DISABLE |
DPFDUNIT_CLOCK_GATE_DISABLE;
/* Required for CxSR */
dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
 
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
3369,8 → 3515,6
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
/*
* According to the spec the following bits should be set in
* order to enable memory self-refresh
3381,9 → 3525,7
I915_WRITE(ILK_DISPLAY_CHICKEN2,
(I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL));
I915_WRITE(ILK_DSPCLK_GATE,
(I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE));
dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
3405,13 → 3547,10
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPFC_DIS1 |
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
 
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
3418,20 → 3557,60
I915_WRITE(_3D_CHICKEN2,
_3D_CHICKEN2_WM_READ_PIPELINED << 16 |
_3D_CHICKEN2_WM_READ_PIPELINED);
 
/* WaDisableRenderCachePipelinedFlush */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
 
ibx_init_clock_gating(dev);
}
 
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* The below fixes the weird display corruption, a few pixels shifted
* downward, on (only) LVDS of some HP laptops with IVY.
*/
for_each_pipe(pipe)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_CHICKEN2_TIMING_OVERRIDE);
/* WADP0ClockGatingDisable */
for_each_pipe(pipe) {
I915_WRITE(TRANS_CHICKEN1(pipe),
TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
}
}
 
static void gen6_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_ELPIN_409_SELECT);
 
/* WaDisableHiZPlanesWhenMSAAEnabled */
I915_WRITE(_3D_CHICKEN,
_MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
 
/* WaSetupGtModeTdRowDispatch */
if (IS_SNB_GT1(dev))
I915_WRITE(GEN6_GT_MODE,
_MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
3481,11 → 3660,12
I915_WRITE(ILK_DISPLAY_CHICKEN2,
I915_READ(ILK_DISPLAY_CHICKEN2) |
ILK_DPARB_GATE | ILK_VSDPFD_FULL);
I915_WRITE(ILK_DSPCLK_GATE,
I915_READ(ILK_DSPCLK_GATE) |
ILK_DPARB_CLK_GATE |
ILK_DPFD_CLK_GATE);
I915_WRITE(ILK_DSPCLK_GATE_D,
I915_READ(ILK_DSPCLK_GATE_D) |
ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
3500,6 → 3680,8
* platforms I checked have a 0 for this. (Maybe BIOS overrides?) */
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_DISABLE(0xffff));
I915_WRITE(GEN6_GT_MODE, _MASKED_BIT_ENABLE(GEN6_GT_MODE_HI));
 
cpt_init_clock_gating(dev);
}
 
static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3514,14 → 3696,25
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
 
static void lpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* TODO: this bit should only be enabled when really needed, then
* disabled when not needed anymore in order to save power.
*/
if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
I915_WRITE(SOUTH_DSPCLK_GATE_D,
I915_READ(SOUTH_DSPCLK_GATE_D) |
PCH_LP_PARTITION_LEVEL_DISABLE);
}
 
static void haswell_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
3531,12 → 3724,6
*/
I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
 
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3565,6 → 3752,10
I915_WRITE(CACHE_MODE_1,
_MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
/* XXX: This is a workaround for early silicon revisions and should be
* removed later.
*/
3574,6 → 3765,7
WM_DBG_DISALLOW_SPRITE |
WM_DBG_DISALLOW_MAXFIFO);
 
lpt_init_clock_gating(dev);
}
 
static void ivybridge_init_clock_gating(struct drm_device *dev)
3580,21 → 3772,31
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
uint32_t snpcr;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableEarlyCull */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
/* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
/* WaDisablePSDDualDispatchEnable */
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
else
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1_GT2,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3604,7 → 3806,18
GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
GEN7_WA_L3_CHICKEN_MODE);
if (IS_IVB_GT1(dev))
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
else
I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
 
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
/* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
* gating disable must be set. Failure to set it results in
* flickering pixels due to Z write ordering failures after
3634,6 → 3847,7
intel_flush_display_plane(dev_priv, pipe);
}
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
3647,6 → 3861,8
snpcr &= ~GEN6_MBC_SNPCR_MASK;
snpcr |= GEN6_MBC_SNPCR_MED;
I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
 
cpt_init_clock_gating(dev);
}
 
static void valleyview_init_clock_gating(struct drm_device *dev)
3653,33 → 3869,51
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
 
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
 
I915_WRITE(WM3_LP_ILK, 0);
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
 
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
 
/* WaDisableEarlyCull */
I915_WRITE(_3D_CHICKEN3,
_MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
 
/* WaDisableBackToBackFlipFix */
I915_WRITE(IVB_CHICKEN3,
CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
CHICKEN3_DGMG_DONE_FIX_DISABLE);
 
I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
_MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
 
/* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
 
/* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
I915_WRITE(GEN7_L3CNTLREG1, I915_READ(GEN7_L3CNTLREG1) | GEN7_L3AGDIS);
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
 
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
/* WaDisableDopClockGating */
I915_WRITE(GEN7_ROW_CHICKEN2,
_MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
 
/* WaForceL3Serialization */
I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
~L3SQ_URB_READ_CAM_MATCH_DISABLE);
 
/* This is required by WaCatErrorRejectionIssue */
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
 
/* WaMbcDriverBootEnable */
I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
GEN6_MBCTL_ENABLE_BOOT_FETCH);
 
3731,6 → 3965,13
PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
PLANEA_FLIPDONE_INT_EN);
 
/*
* WaDisableVLVClockGating_VBIIssue
* Disable clock gating on th GCFG unit to prevent a delay
* in the reporting of vblank events.
*/
I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
}
 
static void g4x_init_clock_gating(struct drm_device *dev)
3749,6 → 3990,10
if (IS_GM45(dev))
dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
 
/* WaDisableRenderCachePipelinedFlush */
I915_WRITE(CACHE_MODE_0,
_MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
}
 
static void crestline_init_clock_gating(struct drm_device *dev)
3804,44 → 4049,11
I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
}
 
static void ibx_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
}
 
static void cpt_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int pipe;
 
/*
* On Ibex Peak and Cougar Point, we need to disable clock
* gating for the panel power sequencer or it will fail to
* start up when no ports are active.
*/
I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
DPLS_EDP_PPS_FIX_DIS);
/* Without this, mode sets may fail silently on FDI */
for_each_pipe(pipe)
I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
}
 
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
dev_priv->display.init_clock_gating(dev);
 
if (dev_priv->display.init_pch_clock_gating)
dev_priv->display.init_pch_clock_gating(dev);
}
 
/* Starting with Haswell, we have different power wells for
3867,7 → 4079,7
 
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
}
}
3905,11 → 4117,6
 
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
 
if (IS_GEN5(dev)) {
if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
dev_priv->display.update_wm = ironlake_update_wm;
3932,7 → 4139,7
} else if (IS_IVYBRIDGE(dev)) {
/* FIXME: detect B0+ stepping and use auto training */
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
dev_priv->display.update_wm = ivybridge_update_wm;
dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
4020,6 → 4227,12
DRM_ERROR("GT thread status wait timed out\n");
}
 
static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
}
 
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
4033,7 → 4246,7
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE, 1);
I915_WRITE_NOTRACE(FORCEWAKE, FORCEWAKE_KERNEL);
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
4043,6 → 4256,13
__gen6_gt_wait_for_thread_c0(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
}
 
static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
u32 forcewake_ack;
4056,8 → 4276,9
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
 
if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
4094,14 → 4315,16
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
/* something from same cacheline, but !FORCEWAKE */
POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
 
static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
/* gen6_gt_check_fifodbg doubles as the POSTING_READ */
I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_MT */
POSTING_READ(ECOBUS);
gen6_gt_check_fifodbg(dev_priv);
}
 
4138,6 → 4361,13
return ret;
}
 
static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
/* something from same cacheline, but !FORCEWAKE_VLV */
POSTING_READ(FORCEWAKE_ACK_VLV);
}
 
static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
{
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4144,7 → 4374,7
FORCEWAKE_ACK_TIMEOUT_MS))
DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
 
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
 
if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
FORCEWAKE_ACK_TIMEOUT_MS))
4155,11 → 4385,25
 
static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
/* The below doubles as a POSTING_READ */
I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
/* something from same cacheline, but !FORCEWAKE_VLV */
POSTING_READ(FORCEWAKE_ACK_VLV);
gen6_gt_check_fifodbg(dev_priv);
}
 
void intel_gt_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
 
if (IS_VALLEYVIEW(dev)) {
vlv_force_wake_reset(dev_priv);
} else if (INTEL_INFO(dev)->gen >= 6) {
__gen6_gt_force_wake_reset(dev_priv);
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
__gen6_gt_force_wake_mt_reset(dev_priv);
}
}
 
void intel_gt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
4166,38 → 4410,63
 
spin_lock_init(&dev_priv->gt_lock);
 
intel_gt_reset(dev);
 
if (IS_VALLEYVIEW(dev)) {
dev_priv->gt.force_wake_get = vlv_force_wake_get;
dev_priv->gt.force_wake_put = vlv_force_wake_put;
} else if (INTEL_INFO(dev)->gen >= 6) {
} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_mt_put;
} else if (IS_GEN6(dev)) {
dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
}
}
 
/* IVB configs may use multi-threaded forcewake */
if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
u32 ecobus;
int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
/* A small trick here - if the bios hasn't configured
* MT forcewake, and if the device is in RC6, then
* force_wake_mt_get will not wake the device and the
* ECOBUS read will return zero. Which will be
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = I915_READ_NOTRACE(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
return -EAGAIN;
}
 
if (ecobus & FORCEWAKE_MT_ENABLE) {
DRM_DEBUG_KMS("Using MT version of forcewake\n");
dev_priv->gt.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->gt.force_wake_put =
__gen6_gt_force_wake_mt_put;
I915_WRITE(GEN6_PCODE_DATA, *val);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500)) {
DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
 
*val = I915_READ(GEN6_PCODE_DATA);
I915_WRITE(GEN6_PCODE_DATA, 0);
 
return 0;
}
 
int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
{
WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
 
if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
return -EAGAIN;
}
 
I915_WRITE(GEN6_PCODE_DATA, val);
I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 
if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
500)) {
DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
return -ETIMEDOUT;
}
 
I915_WRITE(GEN6_PCODE_DATA, 0);
 
return 0;
}
/drivers/video/drm/i915/intel_ringbuffer.c
47,7 → 47,7
 
static inline int ring_space(struct intel_ring_buffer *ring)
{
int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
return space;
247,7 → 247,7
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
 
ret = intel_ring_begin(ring, 4);
461,7 → 461,7
goto err_unref;
 
pc->gtt_offset = obj->gtt_offset;
pc->cpu_page = (void*)MapIoMem((addr_t)obj->pages.page[0], 4096, PG_SW);
pc->cpu_page = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096, PG_SW);
if (pc->cpu_page == NULL)
goto err_unpin;
 
502,13 → 502,25
struct drm_i915_private *dev_priv = dev->dev_private;
int ret = init_ring_common(ring);
 
if (INTEL_INFO(dev)->gen > 3) {
if (INTEL_INFO(dev)->gen > 3)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
 
/* We need to disable the AsyncFlip performance optimisations in order
* to use MI_WAIT_FOR_EVENT within the CS. It should already be
* programmed to '1' on all products.
*/
if (INTEL_INFO(dev)->gen >= 6)
I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
 
/* Required for the hardware to program scanline values for waiting */
if (INTEL_INFO(dev)->gen == 6)
I915_WRITE(GFX_MODE,
_MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
 
if (IS_GEN7(dev))
I915_WRITE(GFX_MODE_GEN7,
_MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
_MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
}
 
if (INTEL_INFO(dev)->gen >= 5) {
ret = init_pipe_control(ring);
552,15 → 564,11
 
static void
update_mboxes(struct intel_ring_buffer *ring,
u32 seqno,
u32 mmio_offset)
{
intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_REGISTER |
MI_SEMAPHORE_UPDATE);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
intel_ring_emit(ring, mmio_offset);
intel_ring_emit(ring, ring->outstanding_lazy_request);
}
 
/**
573,8 → 581,7
* This acts like a signal in the canonical semaphore.
*/
static int
gen6_add_request(struct intel_ring_buffer *ring,
u32 *seqno)
gen6_add_request(struct intel_ring_buffer *ring)
{
u32 mbox1_reg;
u32 mbox2_reg;
587,13 → 594,11
mbox1_reg = ring->signal_mbox[0];
mbox2_reg = ring->signal_mbox[1];
 
*seqno = i915_gem_next_request_seqno(ring);
 
update_mboxes(ring, *seqno, mbox1_reg);
update_mboxes(ring, *seqno, mbox2_reg);
update_mboxes(ring, mbox1_reg);
update_mboxes(ring, mbox2_reg);
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, *seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
 
650,10 → 655,8
} while (0)
 
static int
pc_render_add_request(struct intel_ring_buffer *ring,
u32 *result)
pc_render_add_request(struct intel_ring_buffer *ring)
{
u32 seqno = i915_gem_next_request_seqno(ring);
struct pipe_control *pc = ring->private;
u32 scratch_addr = pc->gtt_offset + 128;
int ret;
674,7 → 677,7
PIPE_CONTROL_WRITE_FLUSH |
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
PIPE_CONTROL_FLUSH(ring, scratch_addr);
scratch_addr += 128; /* write to separate cachelines */
693,11 → 696,10
PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
PIPE_CONTROL_NOTIFY);
intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, 0);
intel_ring_advance(ring);
 
*result = seqno;
return 0;
}
 
885,10 → 887,8
}
 
static int
i9xx_add_request(struct intel_ring_buffer *ring,
u32 *result)
i9xx_add_request(struct intel_ring_buffer *ring)
{
u32 seqno;
int ret;
 
ret = intel_ring_begin(ring, 4);
895,15 → 895,12
if (ret)
return ret;
 
seqno = i915_gem_next_request_seqno(ring);
 
intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
intel_ring_emit(ring, seqno);
intel_ring_emit(ring, ring->outstanding_lazy_request);
intel_ring_emit(ring, MI_USER_INTERRUPT);
intel_ring_advance(ring);
 
*result = seqno;
return 0;
}
 
961,7 → 958,9
}
 
static int
i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 length,
unsigned flags)
{
int ret;
 
972,7 → 971,7
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
MI_BATCH_GTT |
MI_BATCH_NON_SECURE_I965);
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
 
979,21 → 978,56
return 0;
}
 
/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
#define I830_BATCH_LIMIT (256*1024)
static int
i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
u32 offset, u32 len,
unsigned flags)
{
int ret;
 
if (flags & I915_DISPATCH_PINNED) {
ret = intel_ring_begin(ring, 4);
if (ret)
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, offset + len - 8);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
} else {
struct drm_i915_gem_object *obj = ring->private;
u32 cs_offset = obj->gtt_offset;
 
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
 
ret = intel_ring_begin(ring, 9+3);
if (ret)
return ret;
/* Blit the batch (which has now all relocs applied) to the stable batch
* scratch bo area (so that the CS never stumbles over its tlb
* invalidation bug) ... */
intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
intel_ring_emit(ring, cs_offset);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 4096);
intel_ring_emit(ring, offset);
intel_ring_emit(ring, MI_FLUSH);
 
/* ... and execute it. */
intel_ring_emit(ring, MI_BATCH_BUFFER);
intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_emit(ring, cs_offset + len - 8);
intel_ring_advance(ring);
}
 
return 0;
}
1000,7 → 1034,8
 
static int
i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
u32 offset, u32 len,
unsigned flags)
{
int ret;
 
1009,7 → 1044,7
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
intel_ring_advance(ring);
 
return 0;
1050,7 → 1085,7
}
 
ring->status_page.gfx_addr = obj->gtt_offset;
ring->status_page.page_addr = (void*)MapIoMem(obj->pages.page[0],4096,PG_SW);
ring->status_page.page_addr = (void*)MapIoMem((addr_t)sg_page(obj->pages->sgl),4096,PG_SW);
if (ring->status_page.page_addr == NULL) {
ret = -ENOMEM;
goto err_unpin;
1072,6 → 1107,29
return ret;
}
 
static int init_phys_hws_pga(struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
u32 addr;
 
if (!dev_priv->status_page_dmah) {
dev_priv->status_page_dmah =
drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return -ENOMEM;
}
 
addr = dev_priv->status_page_dmah->busaddr;
if (INTEL_INFO(ring->dev)->gen >= 4)
addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
I915_WRITE(HWS_PGA, addr);
 
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
return 0;
}
 
static int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
1083,6 → 1141,7
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
ring->size = 32 * PAGE_SIZE;
memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
 
init_waitqueue_head(&ring->irq_queue);
 
1090,6 → 1149,11
ret = init_status_page(ring);
if (ret)
return ret;
} else {
BUG_ON(ring->id != RCS);
ret = init_phys_hws_pga(ring);
if (ret)
return ret;
}
 
obj = i915_gem_alloc_object(dev, ring->size);
1154,7 → 1218,7
 
/* Disable the ring buffer. The ring must be idle at this point */
dev_priv = ring->dev->dev_private;
ret = intel_wait_ring_idle(ring);
ret = intel_ring_idle(ring);
if (ret)
DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
ring->name, ret);
1173,28 → 1237,6
// cleanup_status_page(ring);
}
 
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
 
if (ring->space < rem) {
int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}
 
virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
 
ring->tail = 0;
ring->space = ring_space(ring);
 
return 0;
}
 
static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
int ret;
1228,7 → 1270,7
if (request->tail == -1)
continue;
 
space = request->tail - (ring->tail + 8);
space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
if (space < 0)
space += ring->size;
if (space >= n) {
1263,7 → 1305,7
return 0;
}
 
int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
{
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
1274,7 → 1316,7
if (ret != -ENOSPC)
return ret;
 
 
trace_i915_ring_wait_begin(ring);
/* With GEM the hangcheck timer should kick us out of the loop,
* leaving it early runs the risk of corrupting GEM state (due
* to running on almost untested codepaths). But on resume
1300,6 → 1342,60
return -EBUSY;
}
 
static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
uint32_t __iomem *virt;
int rem = ring->size - ring->tail;
 
if (ring->space < rem) {
int ret = ring_wait_for_space(ring, rem);
if (ret)
return ret;
}
 
virt = ring->virtual_start + ring->tail;
rem /= 4;
while (rem--)
iowrite32(MI_NOOP, virt++);
 
ring->tail = 0;
ring->space = ring_space(ring);
 
return 0;
}
 
int intel_ring_idle(struct intel_ring_buffer *ring)
{
u32 seqno;
int ret;
 
/* We need to add any requests required to flush the objects and ring */
if (ring->outstanding_lazy_request) {
ret = i915_add_request(ring, NULL, NULL);
if (ret)
return ret;
}
 
/* Wait upon the last request to be completed */
if (list_empty(&ring->request_list))
return 0;
 
seqno = list_entry(ring->request_list.prev,
struct drm_i915_gem_request,
list)->seqno;
 
return i915_wait_seqno(ring, seqno);
}
 
static int
intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
{
if (ring->outstanding_lazy_request)
return 0;
 
return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
}
 
int intel_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
1311,6 → 1407,11
if (ret)
return ret;
 
/* Preallocate the olr before touching the ring */
ret = intel_ring_alloc_seqno(ring);
if (ret)
return ret;
 
if (unlikely(ring->tail + n > ring->effective_size)) {
ret = intel_wrap_ring_buffer(ring);
if (unlikely(ret))
1318,7 → 1419,7
}
 
if (unlikely(ring->space < n)) {
ret = intel_wait_ring_buffer(ring, n);
ret = ring_wait_for_space(ring, n);
if (unlikely(ret))
return ret;
}
1382,11 → 1483,18
return ret;
 
cmd = MI_FLUSH_DW;
/*
* Bspec vol 1c.5 - video engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_GPU_DOMAINS)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
1393,8 → 1501,30
}
 
static int
hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len,
unsigned flags)
{
int ret;
 
ret = intel_ring_begin(ring, 2);
if (ret)
return ret;
 
intel_ring_emit(ring,
MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
 
return 0;
}
 
static int
gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
u32 offset, u32 len)
u32 offset, u32 len,
unsigned flags)
{
int ret;
 
1402,7 → 1532,9
if (ret)
return ret;
 
intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
intel_ring_emit(ring,
MI_BATCH_BUFFER_START |
(flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
/* bit0-7 is the length on GEN6+ */
intel_ring_emit(ring, offset);
intel_ring_advance(ring);
1423,11 → 1555,18
return ret;
 
cmd = MI_FLUSH_DW;
/*
* Bspec vol 1c.3 - blitter engine command streamer:
* "If ENABLED, all TLBs will be invalidated once the flush
* operation is complete. This bit is only valid when the
* Post-Sync Operation field is a value of 1h or 3h."
*/
if (invalidate & I915_GEM_DOMAIN_RENDER)
cmd |= MI_INVALIDATE_TLB;
cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
MI_FLUSH_DW_OP_STOREDW;
intel_ring_emit(ring, cmd);
intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, 0);
intel_ring_emit(ring, MI_NOOP);
intel_ring_advance(ring);
return 0;
1481,7 → 1620,9
ring->irq_enable_mask = I915_USER_INTERRUPT;
}
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 6)
if (IS_HASWELL(dev))
ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 6)
ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
else if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1492,16 → 1633,99
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
 
/* Workaround batchbuffer to combat CS tlb bug. */
if (HAS_BROKEN_CS_TLB(dev)) {
struct drm_i915_gem_object *obj;
int ret;
 
if (!I915_NEED_GFX_HWS(dev)) {
ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
if (obj == NULL) {
DRM_ERROR("Failed to allocate batch bo\n");
return -ENOMEM;
}
 
ret = i915_gem_object_pin(obj, 0, true, false);
if (ret != 0) {
drm_gem_object_unreference(&obj->base);
DRM_ERROR("Failed to ping batch bo\n");
return ret;
}
 
ring->private = obj;
}
 
return intel_init_ring_buffer(dev, ring);
}
 
#if 0
int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
int ret;
 
ring->name = "render ring";
ring->id = RCS;
ring->mmio_base = RENDER_RING_BASE;
 
if (INTEL_INFO(dev)->gen >= 6) {
/* non-kms not supported on gen6+ */
return -ENODEV;
}
 
/* Note: gem is not supported on gen5/ilk without kms (the corresponding
* gem_init ioctl returns with -ENODEV). Hence we do not need to set up
* the special gen5 functions. */
ring->add_request = i9xx_add_request;
if (INTEL_INFO(dev)->gen < 4)
ring->flush = gen2_render_ring_flush;
else
ring->flush = gen4_render_ring_flush;
ring->get_seqno = ring_get_seqno;
if (IS_GEN2(dev)) {
ring->irq_get = i8xx_ring_get_irq;
ring->irq_put = i8xx_ring_put_irq;
} else {
ring->irq_get = i9xx_ring_get_irq;
ring->irq_put = i9xx_ring_put_irq;
}
ring->irq_enable_mask = I915_USER_INTERRUPT;
ring->write_tail = ring_write_tail;
if (INTEL_INFO(dev)->gen >= 4)
ring->dispatch_execbuffer = i965_dispatch_execbuffer;
else if (IS_I830(dev) || IS_845G(dev))
ring->dispatch_execbuffer = i830_dispatch_execbuffer;
else
ring->dispatch_execbuffer = i915_dispatch_execbuffer;
ring->init = init_render_ring;
ring->cleanup = render_ring_cleanup;
 
ring->dev = dev;
INIT_LIST_HEAD(&ring->active_list);
INIT_LIST_HEAD(&ring->request_list);
 
ring->size = size;
ring->effective_size = ring->size;
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
 
ring->virtual_start = ioremap_wc(start, size);
if (ring->virtual_start == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
return -ENOMEM;
}
 
if (!I915_NEED_GFX_HWS(dev)) {
ret = init_phys_hws_pga(ring);
if (ret)
return ret;
}
 
return 0;
}
#endif
 
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
1547,7 → 1771,6
}
ring->init = init_ring_common;
 
 
return intel_init_ring_buffer(dev, ring);
}
 
/drivers/video/drm/i915/intel_ringbuffer.h
1,6 → 1,17
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
 
/*
* Gen2 BSpec "1. Programming Environment" / 1.4.4.6 "Ring Buffer Use"
* Gen3 BSpec "vol1c Memory Interface Functions" / 2.3.4.5 "Ring Buffer Use"
* Gen4+ BSpec "vol1c Memory Interface and Command Stream" / 5.3.4.5 "Ring Buffer Use"
*
* "If the Ring Buffer Head Pointer and the Tail Pointer are on the same
* cacheline, the Head Pointer must not be greater than the Tail
* Pointer."
*/
#define I915_RING_FREE_SPACE 64
 
struct intel_hw_status_page {
u32 *page_addr;
unsigned int gfx_addr;
70,8 → 81,7
int __must_check (*flush)(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains);
int (*add_request)(struct intel_ring_buffer *ring,
u32 *seqno);
int (*add_request)(struct intel_ring_buffer *ring);
/* Some chipsets are not quite as coherent as advertised and need
* an expensive kick to force a true read of the up-to-date seqno.
* However, the up-to-date seqno is not always required and the last
81,7 → 91,10
u32 (*get_seqno)(struct intel_ring_buffer *ring,
bool lazy_coherency);
int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
u32 offset, u32 length);
u32 offset, u32 length,
unsigned flags);
#define I915_DISPATCH_SECURE 0x1
#define I915_DISPATCH_PINNED 0x2
void (*cleanup)(struct intel_ring_buffer *ring);
int (*sync_to)(struct intel_ring_buffer *ring,
struct intel_ring_buffer *to,
181,17 → 194,12
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define I915_GEM_HWS_INDEX 0x20
#define I915_GEM_HWS_SCRATCH_INDEX 0x30
#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
 
int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring)
{
return intel_wait_ring_buffer(ring, ring->size - 8);
}
 
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
 
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
198,10 → 206,9
iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
 
void intel_ring_advance(struct intel_ring_buffer *ring);
int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
 
u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
 
217,6 → 224,12
return ring->tail;
}
 
static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
{
BUG_ON(ring->outstanding_lazy_request == 0);
return ring->outstanding_lazy_request;
}
 
static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
{
if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
/drivers/video/drm/i915/intel_sdvo.c
27,7 → 27,7
*/
#include <linux/i2c.h>
#include <linux/slab.h>
//#include <linux/delay.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
518,7 → 518,7
static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
void *response, int response_len)
{
u8 retry = 5;
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i;
 
531,6 → 531,15
* command to be complete.
*
* Check 5 times in case the hardware failed to read the docs.
*
* Also beware that the first response by many devices is to
* reply PENDING and stall for time. TVs are notorious for
* requiring longer than specified to complete their replies.
* Originally (in the DDX long ago), the delay was only ever 15ms
* with an additional delay of 30ms applied for TVs added later after
* many experiments. To accommodate both sets of delays, we do a
* sequence of slow checks if the device is falling behind and fails
* to reply within 5*15µs.
*/
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
537,8 → 546,12
&status))
goto log_fail;
 
while (status == SDVO_CMD_STATUS_PENDING && retry--) {
while (status == SDVO_CMD_STATUS_PENDING && --retry) {
if (retry < 10)
msleep(15);
else
udelay(15);
 
if (!intel_sdvo_read_byte(intel_sdvo,
SDVO_I2C_CMD_STATUS,
&status))
1237,6 → 1250,30
 
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) != 0) {
/* HW workaround for IBX, we need to move the port to
* transcoder A before disabling it. */
if (HAS_PCH_IBX(encoder->base.dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
if (temp & SDVO_PIPE_B_SELECT) {
temp &= ~SDVO_PIPE_B_SELECT;
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
 
/* Again we need to write this twice. */
I915_WRITE(intel_sdvo->sdvo_reg, temp);
POSTING_READ(intel_sdvo->sdvo_reg);
 
/* Transcoder selection bits only update
* effectively on vblank. */
if (crtc)
intel_wait_for_vblank(encoder->base.dev, pipe);
else
msleep(50);
}
}
 
intel_sdvo_write_sdvox(intel_sdvo, temp & ~SDVO_ENABLE);
}
}
1253,8 → 1290,20
u8 status;
 
temp = I915_READ(intel_sdvo->sdvo_reg);
if ((temp & SDVO_ENABLE) == 0)
if ((temp & SDVO_ENABLE) == 0) {
/* HW workaround for IBX, we need to move the port
* to transcoder A before disabling it. */
if (HAS_PCH_IBX(dev)) {
struct drm_crtc *crtc = encoder->base.crtc;
int pipe = crtc ? to_intel_crtc(crtc)->pipe : -1;
 
/* Restore the transcoder select bit. */
if (pipe == PIPE_B)
temp |= SDVO_PIPE_B_SELECT;
}
 
intel_sdvo_write_sdvox(intel_sdvo, temp | SDVO_ENABLE);
}
for (i = 0; i < 2; i++)
intel_wait_for_vblank(dev, intel_crtc->pipe);
 
1508,17 → 1557,11
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
enum drm_connector_status ret;
 
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
if (!intel_sdvo_get_value(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS,
&response, 2))
return connector_status_unknown;
 
/* add 30ms delay when the output type might be TV */
if (intel_sdvo->caps.output_flags & SDVO_TV_MASK)
msleep(30);
 
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
 
DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
response & 0xff, response >> 8,
intel_sdvo_connector->output_flag);
1805,7 → 1848,7
intel_sdvo_destroy_enhance_property(connector);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
kfree(intel_sdvo_connector);
}
 
static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1837,7 → 1880,7
uint8_t cmd;
int ret;
 
ret = drm_connector_property_set_value(connector, property, val);
ret = drm_object_property_set_value(&connector->base, property, val);
if (ret)
return ret;
 
1894,7 → 1937,7
} else if (IS_TV_OR_LVDS(intel_sdvo_connector)) {
temp_value = val;
if (intel_sdvo_connector->left == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->right, val);
if (intel_sdvo_connector->left_margin == temp_value)
return 0;
1906,7 → 1949,7
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->right == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->left, val);
if (intel_sdvo_connector->right_margin == temp_value)
return 0;
1918,7 → 1961,7
cmd = SDVO_CMD_SET_OVERSCAN_H;
goto set_value;
} else if (intel_sdvo_connector->top == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->bottom, val);
if (intel_sdvo_connector->top_margin == temp_value)
return 0;
1930,7 → 1973,7
cmd = SDVO_CMD_SET_OVERSCAN_V;
goto set_value;
} else if (intel_sdvo_connector->bottom == property) {
drm_connector_property_set_value(connector,
drm_object_property_set_value(&connector->base,
intel_sdvo_connector->top, val);
if (intel_sdvo_connector->bottom_margin == temp_value)
return 0;
2003,7 → 2046,7
drm_mode_destroy(encoder->dev,
intel_sdvo->sdvo_lvds_fixed_mode);
 
// i2c_del_adapter(&intel_sdvo->ddc);
i2c_del_adapter(&intel_sdvo->ddc);
intel_encoder_destroy(encoder);
}
 
2083,17 → 2126,24
else
mapping = &dev_priv->sdvo_mappings[1];
 
if (mapping->initialized && intel_gmbus_is_port_valid(mapping->i2c_pin))
pin = mapping->i2c_pin;
else
pin = GMBUS_PORT_DPB;
if (mapping->initialized)
pin = mapping->i2c_pin;
 
if (intel_gmbus_is_port_valid(pin)) {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin);
intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
 
/* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow
* our code totally fails once we start using gmbus. Hence fall back to
* bit banging for now. */
intel_gmbus_force_bit(sdvo->i2c, true);
} else {
sdvo->i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB);
}
 
/* undo any changes intel_sdvo_select_i2c_bus() did to sdvo->i2c */
static void
intel_sdvo_unselect_i2c_bus(struct intel_sdvo *sdvo)
{
intel_gmbus_force_bit(sdvo->i2c, false);
}
 
static bool
2438,7 → 2488,7
i, tv_format_names[intel_sdvo_connector->tv_format_supported[i]]);
 
intel_sdvo->tv_format_index = intel_sdvo_connector->tv_format_supported[0];
drm_connector_attach_property(&intel_sdvo_connector->base.base,
drm_object_attach_property(&intel_sdvo_connector->base.base.base,
intel_sdvo_connector->tv_format, 0);
return true;
 
2454,7 → 2504,7
intel_sdvo_connector->name = \
drm_property_create_range(dev, 0, #name, 0, data_value[0]); \
if (!intel_sdvo_connector->name) return false; \
drm_connector_attach_property(connector, \
drm_object_attach_property(&connector->base, \
intel_sdvo_connector->name, \
intel_sdvo_connector->cur_##name); \
DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
2491,7 → 2541,7
if (!intel_sdvo_connector->left)
return false;
 
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->left,
intel_sdvo_connector->left_margin);
 
2500,7 → 2550,7
if (!intel_sdvo_connector->right)
return false;
 
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->right,
intel_sdvo_connector->right_margin);
DRM_DEBUG_KMS("h_overscan: max %d, "
2528,7 → 2578,7
if (!intel_sdvo_connector->top)
return false;
 
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->top,
intel_sdvo_connector->top_margin);
 
2538,7 → 2588,7
if (!intel_sdvo_connector->bottom)
return false;
 
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->bottom,
intel_sdvo_connector->bottom_margin);
DRM_DEBUG_KMS("v_overscan: max %d, "
2570,7 → 2620,7
if (!intel_sdvo_connector->dot_crawl)
return false;
 
drm_connector_attach_property(connector,
drm_object_attach_property(&connector->base,
intel_sdvo_connector->dot_crawl,
intel_sdvo_connector->cur_dot_crawl);
DRM_DEBUG_KMS("dot crawl: current %d\n", response);
2655,7 → 2705,7
sdvo->ddc.algo_data = sdvo;
sdvo->ddc.algo = &intel_sdvo_ddc_proxy;
 
return 1; //i2c_add_adapter(&sdvo->ddc) == 0;
return i2c_add_adapter(&sdvo->ddc) == 0;
}
 
bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob)
2674,10 → 2724,8
intel_sdvo->is_sdvob = is_sdvob;
intel_sdvo->slave_addr = intel_sdvo_get_slave_addr(dev, intel_sdvo) >> 1;
intel_sdvo_select_i2c_bus(dev_priv, intel_sdvo, sdvo_reg);
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev)) {
kfree(intel_sdvo);
return false;
}
if (!intel_sdvo_init_ddc_proxy(intel_sdvo, dev))
goto err_i2c_bus;
 
/* encoder type will be decided later */
intel_encoder = &intel_sdvo->base;
2775,7 → 2823,9
 
err:
drm_encoder_cleanup(&intel_encoder->base);
// i2c_del_adapter(&intel_sdvo->ddc);
i2c_del_adapter(&intel_sdvo->ddc);
err_i2c_bus:
intel_sdvo_unselect_i2c_bus(intel_sdvo);
kfree(intel_sdvo);
 
return false;
/drivers/video/drm/i915/intel_sprite.c
48,7 → 48,8
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe;
u32 sprctl, sprscale = 0;
int pixel_size;
unsigned long sprsurf_offset, linear_offset;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
sprctl = I915_READ(SPRCTL(pipe));
 
61,33 → 62,24
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
pixel_size = 2;
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
sprctl |= SPRITE_FORMAT_RGBX888;
pixel_size = 4;
break;
BUG();
}
 
if (obj->tiling_mode != I915_TILING_NONE)
127,18 → 119,27
 
I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
sprsurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
pixel_size, fb->pitches[0]);
linear_offset -= sprsurf_offset;
 
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev))
I915_WRITE(SPROFFSET(pipe), (y << 16) | x);
else if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
else
I915_WRITE(SPRLINOFF(pipe), linear_offset);
 
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
I915_WRITE(SPRLINOFF(pipe), offset);
}
I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), sprscale);
I915_WRITE(SPRCTL(pipe), sprctl);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset + sprsurf_offset);
POSTING_READ(SPRSURF(pipe));
}
 
152,6 → 153,7
 
I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
/* Can't leave the scaler enabled... */
if (intel_plane->can_scale)
I915_WRITE(SPRSCALE(pipe), 0);
/* Activate double buffered register update */
I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
225,8 → 227,10
struct drm_device *dev = plane->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_plane *intel_plane = to_intel_plane(plane);
int pipe = intel_plane->pipe, pixel_size;
int pipe = intel_plane->pipe;
unsigned long dvssurf_offset, linear_offset;
u32 dvscntr, dvsscale;
int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
 
dvscntr = I915_READ(DVSCNTR(pipe));
 
239,33 → 243,24
switch (fb->pixel_format) {
case DRM_FORMAT_XBGR8888:
dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_XBGR;
pixel_size = 4;
break;
case DRM_FORMAT_XRGB8888:
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
case DRM_FORMAT_YUYV:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
pixel_size = 2;
break;
case DRM_FORMAT_YVYU:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
pixel_size = 2;
break;
case DRM_FORMAT_UYVY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
pixel_size = 2;
break;
case DRM_FORMAT_VYUY:
dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
pixel_size = 2;
break;
default:
DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
dvscntr |= DVS_FORMAT_RGBX888;
pixel_size = 4;
break;
BUG();
}
 
if (obj->tiling_mode != I915_TILING_NONE)
289,18 → 284,22
 
I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
if (obj->tiling_mode != I915_TILING_NONE) {
 
linear_offset = y * fb->pitches[0] + x * pixel_size;
dvssurf_offset =
intel_gen4_compute_offset_xtiled(&x, &y,
pixel_size, fb->pitches[0]);
linear_offset -= dvssurf_offset;
 
if (obj->tiling_mode != I915_TILING_NONE)
I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
} else {
unsigned long offset;
else
I915_WRITE(DVSLINOFF(pipe), linear_offset);
 
offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
I915_WRITE(DVSLINOFF(pipe), offset);
}
I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
I915_WRITE(DVSSCALE(pipe), dvsscale);
I915_WRITE(DVSCNTR(pipe), dvscntr);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset + dvssurf_offset);
POSTING_READ(DVSSURF(pipe));
}
 
422,6 → 421,8
struct intel_framebuffer *intel_fb;
struct drm_i915_gem_object *obj, *old_obj;
int pipe = intel_plane->pipe;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
int ret = 0;
int x = src_x >> 16, y = src_y >> 16;
int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
436,7 → 437,7
src_h = src_h >> 16;
 
/* Pipe must be running... */
if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
if (!(I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE))
return -EINVAL;
 
if (crtc_x >= primary_w || crtc_y >= primary_h)
446,6 → 447,15
if (intel_plane->pipe != intel_crtc->pipe)
return -EINVAL;
 
/* Sprite planes can be linear or x-tiled surfaces */
switch (obj->tiling_mode) {
case I915_TILING_NONE:
case I915_TILING_X:
break;
default:
return -EINVAL;
}
 
/*
* Clamp the width & height into the visible area. Note we don't
* try to scale the source if part of the visible region is offscreen.
473,6 → 483,12
goto out;
 
/*
* We may not have a scaler, eg. HSW does not have it any more
*/
if (!intel_plane->can_scale && (crtc_w != src_w || crtc_h != src_h))
return -EINVAL;
 
/*
* We can take a larger source and scale it down, but
* only so much... 16x is the max on SNB.
*/
570,8 → 586,6
struct intel_plane *intel_plane;
int ret = 0;
 
// if (!drm_core_check_feature(dev, DRIVER_MODESET))
// return -ENODEV;
 
/* Make sure we don't try to enable both src & dest simultaneously */
if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
603,8 → 617,6
struct intel_plane *intel_plane;
int ret = 0;
 
// if (!drm_core_check_feature(dev, DRIVER_MODESET))
// return -ENODEV;
 
mutex_lock(&dev->mode_config.mutex);
 
665,6 → 677,7
switch (INTEL_INFO(dev)->gen) {
case 5:
case 6:
intel_plane->can_scale = true;
intel_plane->max_downscale = 16;
intel_plane->update_plane = ilk_update_plane;
intel_plane->disable_plane = ilk_disable_plane;
681,6 → 694,10
break;
 
case 7:
if (IS_HASWELL(dev) || IS_VALLEYVIEW(dev))
intel_plane->can_scale = false;
else
intel_plane->can_scale = true;
intel_plane->max_downscale = 2;
intel_plane->update_plane = ivb_update_plane;
intel_plane->disable_plane = ivb_disable_plane;
/drivers/video/drm/i915/kms_display.c
368,7 → 368,9
 
main_device = dev;
 
#ifdef __HWA__
err = init_bitmaps();
#endif
 
return 0;
};
624,8 → 626,8
 
 
 
#ifdef __HWA__
 
 
extern struct hmm bm_mm;
 
 
1312,18 → 1314,18
#endif
 
 
#endif
 
 
 
 
 
 
void __stdcall run_workqueue(struct workqueue_struct *cwq)
{
unsigned long irqflags;
 
// dbgprintf("wq: %x head %x, next %x\n",
// cwq, &cwq->worklist, cwq->worklist.next);
dbgprintf("wq: %x head %x, next %x\n",
cwq, &cwq->worklist, cwq->worklist.next);
 
spin_lock_irqsave(&cwq->lock, irqflags);
 
1333,8 → 1335,8
struct work_struct, entry);
work_func_t f = work->func;
list_del_init(cwq->worklist.next);
// dbgprintf("head %x, next %x\n",
// &cwq->worklist, cwq->worklist.next);
dbgprintf("head %x, next %x\n",
&cwq->worklist, cwq->worklist.next);
 
spin_unlock_irqrestore(&cwq->lock, irqflags);
f(work);
1351,8 → 1353,8
{
unsigned long flags;
 
// dbgprintf("wq: %x, work: %x\n",
// wq, work );
dbgprintf("wq: %x, work: %x\n",
wq, work );
 
if(!list_empty(&work->entry))
return 0;
1365,8 → 1367,8
list_add_tail(&work->entry, &wq->worklist);
 
spin_unlock_irqrestore(&wq->lock, flags);
// dbgprintf("wq: %x head %x, next %x\n",
// wq, &wq->worklist, wq->worklist.next);
dbgprintf("wq: %x head %x, next %x\n",
wq, &wq->worklist, wq->worklist.next);
 
return 1;
};
1376,8 → 1378,8
struct delayed_work *dwork = (struct delayed_work *)__data;
struct workqueue_struct *wq = dwork->work.data;
 
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work );
 
__queue_work(wq, &dwork->work);
}
1398,8 → 1400,8
{
u32 flags;
 
// dbgprintf("wq: %x, work: %x\n",
// wq, &dwork->work );
dbgprintf("wq: %x, work: %x\n",
wq, &dwork->work );
 
if (delay == 0)
return __queue_work(wq, &dwork->work);
/drivers/video/drm/i915/main.c
53,8 → 53,8
 
if(!dbg_open(log))
{
// strcpy(log, "/tmp1/1/i915.log");
strcpy(log, "/RD/1/DRIVERS/i915.log");
// strcpy(log, "/BD1/2/i915.log");
 
if(!dbg_open(log))
{
62,7 → 62,7
return 0;
};
}
dbgprintf("i915 preview #08\n cmdline: %s\n", cmdline);
dbgprintf("i915 RC 10\n cmdline: %s\n", cmdline);
 
cpu_detect();
dbgprintf("\ncache line size %d\n", x86_clflush_size);
153,20 → 153,20
 
case SRV_CREATE_SURFACE:
// check_input(8);
retval = create_surface(main_device, (struct io_call_10*)inp);
// retval = create_surface(main_device, (struct io_call_10*)inp);
break;
 
case SRV_LOCK_SURFACE:
retval = lock_surface((struct io_call_12*)inp);
// retval = lock_surface((struct io_call_12*)inp);
break;
 
case SRV_RESIZE_SURFACE:
retval = resize_surface((struct io_call_14*)inp);
// retval = resize_surface((struct io_call_14*)inp);
break;
 
case SRV_BLIT_BITMAP:
srv_blit_bitmap( inp[0], inp[1], inp[2],
inp[3], inp[4], inp[5], inp[6]);
// case SRV_BLIT_BITMAP:
// srv_blit_bitmap( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
 
// blit_tex( inp[0], inp[1], inp[2],
// inp[3], inp[4], inp[5], inp[6]);
279,3 → 279,26
}
}
 
 
int get_driver_caps(hwcaps_t *caps)
{
int ret = 0;
 
switch(caps->idx)
{
case 0:
caps->opt[0] = 0;
caps->opt[1] = 0;
break;
 
case 1:
caps->cap1.max_tex_width = 4096;
caps->cap1.max_tex_height = 4096;
break;
default:
ret = 1;
};
caps->idx = 1;
return ret;
}