/drivers/video/drm/drm_fb_helper.c |
---|
File deleted |
/drivers/video/drm/include/types.h |
---|
0,0 → 1,236 |
#ifndef __TYPES_H__ |
#define __TYPES_H__ |
typedef int bool; |
#define false 0 |
#define true 1 |
typedef unsigned int size_t; |
typedef unsigned int count_t; |
typedef unsigned int addr_t; |
typedef unsigned char u8; |
typedef unsigned short u16; |
typedef unsigned int u32; |
typedef unsigned long long u64; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned int __u32; |
typedef unsigned long long __u64; |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed int __s32; |
typedef signed long long __s64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned int uint32_t; |
typedef unsigned long long uint64_t; |
typedef unsigned char u8_t; |
typedef unsigned short u16_t; |
typedef unsigned int u32_t; |
typedef unsigned long long u64_t; |
typedef signed char int8_t; |
typedef signed long long int64_t; |
#define NULL (void*)0 |
typedef uint32_t dma_addr_t; |
typedef uint32_t resource_size_t; |
#define __user |
#define cpu_to_le16(v16) (v16) |
#define cpu_to_le32(v32) (v32) |
#define cpu_to_le64(v64) (v64) |
#define le16_to_cpu(v16) (v16) |
#define le32_to_cpu(v32) (v32) |
#define le64_to_cpu(v64) (v64) |
#define likely(x) __builtin_expect(!!(x), 1) |
#define unlikely(x) __builtin_expect(!!(x), 0) |
#define BITS_PER_LONG 32 |
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG) |
#define DECLARE_BITMAP(name,bits) \ |
unsigned long name[BITS_TO_LONGS(bits)] |
#define KERN_EMERG "<0>" /* system is unusable */ |
#define KERN_ALERT "<1>" /* action must be taken immediately */ |
#define KERN_CRIT "<2>" /* critical conditions */ |
#define KERN_ERR "<3>" /* error conditions */ |
#define KERN_WARNING "<4>" /* warning conditions */ |
#define KERN_NOTICE "<5>" /* normal but significant condition */ |
#define KERN_INFO "<6>" /* informational */ |
#define KERN_DEBUG "<7>" /* debug-level messages */ |
//int printk(const char *fmt, ...); |
#define printk(fmt, arg...) dbgprintf(fmt , ##arg) |
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */ |
#define DRM_INFO(fmt, arg...) dbgprintf("DRM: "fmt , ##arg) |
#define DRM_ERROR(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1) |
#define __must_be_array(a) \ |
BUILD_BUG_ON_ZERO(__builtin_types_compatible_p(typeof(a), typeof(&a[0]))) |
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
#ifndef HAVE_ARCH_BUG |
#define BUG() do { \ |
printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __FUNCTION__); \ |
/* panic("BUG!"); */ \ |
} while (0) |
#endif |
#ifndef HAVE_ARCH_BUG_ON |
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0) |
#endif |
#define MTRR_TYPE_UNCACHABLE 0 |
#define MTRR_TYPE_WRCOMB 1 |
#define MTRR_TYPE_WRTHROUGH 4 |
#define MTRR_TYPE_WRPROT 5 |
#define MTRR_TYPE_WRBACK 6 |
#define MTRR_NUM_TYPES 7 |
int dbgprintf(const char* format, ...); |
#define GFP_KERNEL 0 |
//#include <stdio.h> |
int snprintf(char *str, size_t size, const char *format, ...); |
//#include <string.h> |
void* memcpy(void *s1, const void *s2, size_t n); |
void* memset(void *s, int c, size_t n); |
size_t strlen(const char *s); |
char *strcpy(char *s1, const char *s2); |
char *strncpy (char *dst, const char *src, size_t len); |
void *malloc(size_t size); |
#define kmalloc(s,f) malloc((s)) |
#define kfree free |
static inline void *kzalloc(size_t size, u32_t flags) |
{ |
void *ret = malloc(size); |
memset(ret, 0, size); |
return ret; |
} |
struct drm_file; |
#define offsetof(TYPE,MEMBER) __builtin_offsetof(TYPE,MEMBER) |
#define container_of(ptr, type, member) ({ \ |
const typeof( ((type *)0)->member ) *__mptr = (ptr); \ |
(type *)( (char *)__mptr - offsetof(type,member) );}) |
#define DRM_MEMORYBARRIER() __asm__ __volatile__("lock; addl $0,0(%esp)") |
#define mb() __asm__ __volatile__("lock; addl $0,0(%esp)") |
#define PAGE_SIZE 4096 |
#define PAGE_SHIFT 12 |
#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) |
static inline void bitmap_zero(unsigned long *dst, int nbits) |
{ |
if (nbits <= BITS_PER_LONG) |
*dst = 0UL; |
else { |
int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
memset(dst, 0, len); |
} |
} |
#define EXPORT_SYMBOL(x) |
#define min(x,y) ({ \ |
typeof(x) _x = (x); \ |
typeof(y) _y = (y); \ |
(void) (&_x == &_y); \ |
_x < _y ? _x : _y; }) |
#define max(x,y) ({ \ |
typeof(x) _x = (x); \ |
typeof(y) _y = (y); \ |
(void) (&_x == &_y); \ |
_x > _y ? _x : _y; }) |
extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor); |
# define do_div(n,base) ({ \ |
uint32_t __base = (base); \ |
uint32_t __rem; \ |
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \ |
if (likely(((n) >> 32) == 0)) { \ |
__rem = (uint32_t)(n) % __base; \ |
(n) = (uint32_t)(n) / __base; \ |
} else \ |
__rem = __div64_32(&(n), __base); \ |
__rem; \ |
}) |
#define lower_32_bits(n) ((u32)(n)) |
#define INT_MAX ((int)(~0U>>1)) |
#define INT_MIN (-INT_MAX - 1) |
#define UINT_MAX (~0U) |
#define LONG_MAX ((long)(~0UL>>1)) |
#define LONG_MIN (-LONG_MAX - 1) |
#define ULONG_MAX (~0UL) |
#define LLONG_MAX ((long long)(~0ULL>>1)) |
#define LLONG_MIN (-LLONG_MAX - 1) |
#define ULLONG_MAX (~0ULL) |
static inline void *kcalloc(size_t n, size_t size, u32_t flags) |
{ |
if (n != 0 && size > ULONG_MAX / n) |
return NULL; |
return kzalloc(n * size, 0); |
} |
#define ENTRY() dbgprintf("enter %s\n",__FUNCTION__) |
#define LEAVE() dbgprintf("leave %s\n",__FUNCTION__) |
#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) |
#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) |
#define PCI_DEVICE_ID_ATI_RADEON_QY 0x5159 |
#endif //__TYPES_H__ |
/drivers/video/drm/include/drmP.h |
---|
0,0 → 1,1555 |
/** |
* \file drmP.h |
* Private header for Direct Rendering Manager |
* |
* \author Rickard E. (Rik) Faith <faith@valinux.com> |
* \author Gareth Hughes <gareth@valinux.com> |
*/ |
/* |
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef _DRM_P_H_ |
#define _DRM_P_H_ |
#include <pci.h> |
#include <drm.h> |
#include <drm_edid.h> |
#include <drm_crtc.h> |
//#include <linux/idr.h> |
struct drm_file; |
struct drm_device; |
//#include "drm_os_linux.h" |
//#include "drm_hashtab.h" |
//#include "drm_mm.h" |
#define DRM_UT_CORE 0x01 |
#define DRM_UT_DRIVER 0x02 |
#define DRM_UT_KMS 0x04 |
#define DRM_UT_MODE 0x08 |
#define KHZ2PICOS(a) (1000000000UL/(a)) |
extern void drm_ut_debug_printk(unsigned int request_level, |
const char *prefix, |
const char *function_name, |
const char *format, ...); |
#define DRM_DEBUG_MODE(prefix, fmt, args...) \ |
do { \ |
dbgprintf("drm debug: %s" fmt, \ |
__func__, ##args); \ |
} while (0) |
#define DRM_DEBUG(fmt, arg...) \ |
printk("[" DRM_NAME ":%s] " fmt , __func__ , ##arg) |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
// struct kref refcount; |
/** Handle count of this object. Each handle also holds a reference */ |
// struct kref handlecount; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
// struct file *filp; |
/* Mapping info for this object */ |
// struct drm_map_list map_list; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
void *driver_private; |
}; |
#if 0 |
/***********************************************************************/ |
/** \name DRM template customization defaults */ |
/*@{*/ |
/* driver capabilities and requirements mask */ |
#define DRIVER_USE_AGP 0x1 |
#define DRIVER_REQUIRE_AGP 0x2 |
#define DRIVER_USE_MTRR 0x4 |
#define DRIVER_PCI_DMA 0x8 |
#define DRIVER_SG 0x10 |
#define DRIVER_HAVE_DMA 0x20 |
#define DRIVER_HAVE_IRQ 0x40 |
#define DRIVER_IRQ_SHARED 0x80 |
#define DRIVER_IRQ_VBL 0x100 |
#define DRIVER_DMA_QUEUE 0x200 |
#define DRIVER_FB_DMA 0x400 |
#define DRIVER_IRQ_VBL2 0x800 |
#define DRIVER_GEM 0x1000 |
#define DRIVER_MODESET 0x2000 |
/***********************************************************************/ |
/** \name Begin the DRM... */ |
/*@{*/ |
#define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then |
also include looping detection. */ |
#define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ |
#define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ |
#define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ |
#define DRM_LOOPING_LIMIT 5000000 |
#define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ |
#define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ |
#define DRM_FLAG_DEBUG 0x01 |
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) |
#define DRM_MAP_HASH_OFFSET 0x10000000 |
/*@}*/ |
/***********************************************************************/ |
/** \name Macros to make printk easier */ |
/*@{*/ |
/** |
* Error output. |
* |
* \param fmt printf() like format string. |
* \param arg arguments |
*/ |
#define DRM_ERROR(fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
/** |
* Memory error output. |
* |
* \param area memory area where the error occurred. |
* \param fmt printf() like format string. |
* \param arg arguments |
*/ |
#define DRM_MEM_ERROR(area, fmt, arg...) \ |
printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \ |
drm_mem_stats[area].name , ##arg) |
#define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) |
/** |
* Debug output. |
* |
* \param fmt printf() like format string. |
* \param arg arguments |
*/ |
#if DRM_DEBUG_CODE |
#define DRM_DEBUG(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_DRIVER(prefix, fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_DRIVER, prefix, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_KMS(prefix, fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_KMS, prefix, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_DEBUG_MODE(prefix, fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_MODE, prefix, \ |
__func__, fmt, ##args); \ |
} while (0) |
#define DRM_LOG(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_CORE, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#define DRM_LOG_KMS(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_KMS, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#define DRM_LOG_MODE(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_MODE, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#define DRM_LOG_DRIVER(fmt, args...) \ |
do { \ |
drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \ |
NULL, fmt, ##args); \ |
} while (0) |
#else |
#define DRM_DEBUG_DRIVER(prefix, fmt, args...) do { } while (0) |
#define DRM_DEBUG_KMS(prefix, fmt, args...) do { } while (0) |
#define DRM_DEBUG_MODE(prefix, fmt, args...) do { } while (0) |
#define DRM_DEBUG(fmt, arg...) do { } while (0) |
#define DRM_LOG(fmt, arg...) do { } while (0) |
#define DRM_LOG_KMS(fmt, args...) do { } while (0) |
#define DRM_LOG_MODE(fmt, arg...) do { } while (0) |
#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0) |
#endif |
#define DRM_PROC_LIMIT (PAGE_SIZE-80) |
#define DRM_PROC_PRINT(fmt, arg...) \ |
len += sprintf(&buf[len], fmt , ##arg); \ |
if (len > DRM_PROC_LIMIT) { *eof = 1; return len - offset; } |
#define DRM_PROC_PRINT_RET(ret, fmt, arg...) \ |
len += sprintf(&buf[len], fmt , ##arg); \ |
if (len > DRM_PROC_LIMIT) { ret; *eof = 1; return len - offset; } |
/*@}*/ |
/***********************************************************************/ |
/** \name Internal types and structures */ |
/*@{*/ |
#define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) |
#define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) |
#define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) |
#define DRM_WAITCOUNT(dev,idx) DRM_BUFCOUNT(&dev->queuelist[idx]->waitlist) |
#define DRM_IF_VERSION(maj, min) (maj << 16 | min) |
/** |
* Get the private SAREA mapping. |
* |
* \param _dev DRM device. |
* \param _ctx context number. |
* \param _map output mapping. |
*/ |
#define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ |
(_map) = (_dev)->context_sareas[_ctx]; \ |
} while(0) |
/** |
* Test that the hardware lock is held by the caller, returning otherwise. |
* |
* \param dev DRM device. |
* \param filp file pointer of the caller. |
*/ |
#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ |
do { \ |
if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ |
_file_priv->master->lock.file_priv != _file_priv) { \ |
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
__func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ |
_file_priv->master->lock.file_priv, _file_priv); \ |
return -EINVAL; \ |
} \ |
} while (0) |
/** |
* Copy and IOCTL return string to user space |
*/ |
#define DRM_COPY( name, value ) \ |
len = strlen( value ); \ |
if ( len > name##_len ) len = name##_len; \ |
name##_len = strlen( value ); \ |
if ( len && name ) { \ |
if ( copy_to_user( name, value, len ) ) \ |
return -EFAULT; \ |
} |
/** |
* Ioctl function type. |
* |
* \param inode device inode. |
* \param file_priv DRM file private pointer. |
* \param cmd command. |
* \param arg argument. |
*/ |
typedef int drm_ioctl_t(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, |
unsigned long arg); |
#define DRM_AUTH 0x1 |
#define DRM_MASTER 0x2 |
#define DRM_ROOT_ONLY 0x4 |
#define DRM_CONTROL_ALLOW 0x8 |
struct drm_ioctl_desc { |
unsigned int cmd; |
int flags; |
drm_ioctl_t *func; |
}; |
/** |
* Creates a driver or general drm_ioctl_desc array entry for the given |
* ioctl, for use by drm_ioctl(). |
*/ |
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ |
[DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags} |
struct drm_magic_entry { |
struct list_head head; |
struct drm_hash_item hash_item; |
struct drm_file *priv; |
}; |
struct drm_vma_entry { |
struct list_head head; |
struct vm_area_struct *vma; |
pid_t pid; |
}; |
/** |
* DMA buffer. |
*/ |
struct drm_buf { |
int idx; /**< Index into master buflist */ |
int total; /**< Buffer size */ |
int order; /**< log-base-2(total) */ |
int used; /**< Amount of buffer in use (for DMA) */ |
unsigned long offset; /**< Byte offset (used internally) */ |
void *address; /**< Address of buffer */ |
unsigned long bus_address; /**< Bus address of buffer */ |
struct drm_buf *next; /**< Kernel-only: used for free list */ |
__volatile__ int waiting; /**< On kernel DMA queue */ |
__volatile__ int pending; /**< On hardware DMA queue */ |
wait_queue_head_t dma_wait; /**< Processes waiting */ |
struct drm_file *file_priv; /**< Private of holding file descr */ |
int context; /**< Kernel queue for this buffer */ |
int while_locked; /**< Dispatch this buffer while locked */ |
enum { |
DRM_LIST_NONE = 0, |
DRM_LIST_FREE = 1, |
DRM_LIST_WAIT = 2, |
DRM_LIST_PEND = 3, |
DRM_LIST_PRIO = 4, |
DRM_LIST_RECLAIM = 5 |
} list; /**< Which list we're on */ |
int dev_priv_size; /**< Size of buffer private storage */ |
void *dev_private; /**< Per-buffer private storage */ |
}; |
/** bufs is one longer than it has to be */ |
struct drm_waitlist { |
int count; /**< Number of possible buffers */ |
struct drm_buf **bufs; /**< List of pointers to buffers */ |
struct drm_buf **rp; /**< Read pointer */ |
struct drm_buf **wp; /**< Write pointer */ |
struct drm_buf **end; /**< End pointer */ |
spinlock_t read_lock; |
spinlock_t write_lock; |
}; |
struct drm_freelist { |
int initialized; /**< Freelist in use */ |
atomic_t count; /**< Number of free buffers */ |
struct drm_buf *next; /**< End pointer */ |
wait_queue_head_t waiting; /**< Processes waiting on free bufs */ |
int low_mark; /**< Low water mark */ |
int high_mark; /**< High water mark */ |
atomic_t wfh; /**< If waiting for high mark */ |
spinlock_t lock; |
}; |
typedef struct drm_dma_handle { |
dma_addr_t busaddr; |
void *vaddr; |
size_t size; |
} drm_dma_handle_t; |
/** |
* Buffer entry. There is one of this for each buffer size order. |
*/ |
struct drm_buf_entry { |
int buf_size; /**< size */ |
int buf_count; /**< number of buffers */ |
struct drm_buf *buflist; /**< buffer list */ |
int seg_count; |
int page_order; |
struct drm_dma_handle **seglist; |
struct drm_freelist freelist; |
}; |
/** File private data */ |
struct drm_file { |
int authenticated; |
pid_t pid; |
uid_t uid; |
drm_magic_t magic; |
unsigned long ioctl_count; |
struct list_head lhead; |
struct drm_minor *minor; |
unsigned long lock_count; |
/** Mapping of mm object handles to object pointers. */ |
struct idr object_idr; |
/** Lock for synchronization of access to object_idr. */ |
spinlock_t table_lock; |
struct file *filp; |
void *driver_priv; |
int is_master; /* this file private is a master for a minor */ |
struct drm_master *master; /* master this node is currently associated with |
N.B. not always minor->master */ |
struct list_head fbs; |
}; |
/** Wait queue */ |
struct drm_queue { |
atomic_t use_count; /**< Outstanding uses (+1) */ |
atomic_t finalization; /**< Finalization in progress */ |
atomic_t block_count; /**< Count of processes waiting */ |
atomic_t block_read; /**< Queue blocked for reads */ |
wait_queue_head_t read_queue; /**< Processes waiting on block_read */ |
atomic_t block_write; /**< Queue blocked for writes */ |
wait_queue_head_t write_queue; /**< Processes waiting on block_write */ |
atomic_t total_queued; /**< Total queued statistic */ |
atomic_t total_flushed; /**< Total flushes statistic */ |
atomic_t total_locks; /**< Total locks statistics */ |
enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ |
struct drm_waitlist waitlist; /**< Pending buffers */ |
wait_queue_head_t flush_queue; /**< Processes waiting until flush */ |
}; |
/** |
* Lock data. |
*/ |
struct drm_lock_data { |
struct drm_hw_lock *hw_lock; /**< Hardware lock */ |
/** Private of lock holder's file (NULL=kernel) */ |
struct drm_file *file_priv; |
wait_queue_head_t lock_queue; /**< Queue of blocked processes */ |
unsigned long lock_time; /**< Time of last lock in jiffies */ |
spinlock_t spinlock; |
uint32_t kernel_waiters; |
uint32_t user_waiters; |
int idle_has_lock; |
}; |
/** |
* DMA data. |
*/ |
struct drm_device_dma { |
struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ |
int buf_count; /**< total number of buffers */ |
struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ |
int seg_count; |
int page_count; /**< number of pages */ |
unsigned long *pagelist; /**< page list */ |
unsigned long byte_count; |
enum { |
_DRM_DMA_USE_AGP = 0x01, |
_DRM_DMA_USE_SG = 0x02, |
_DRM_DMA_USE_FB = 0x04, |
_DRM_DMA_USE_PCI_RO = 0x08 |
} flags; |
}; |
/** |
* AGP memory entry. Stored as a doubly linked list. |
*/ |
struct drm_agp_mem { |
unsigned long handle; /**< handle */ |
DRM_AGP_MEM *memory; |
unsigned long bound; /**< address */ |
int pages; |
struct list_head head; |
}; |
/** |
* AGP data. |
* |
* \sa drm_agp_init() and drm_device::agp. |
*/ |
struct drm_agp_head { |
DRM_AGP_KERN agp_info; /**< AGP device information */ |
struct list_head memory; |
unsigned long mode; /**< AGP mode */ |
struct agp_bridge_data *bridge; |
int enabled; /**< whether the AGP bus as been enabled */ |
int acquired; /**< whether the AGP device has been acquired */ |
unsigned long base; |
int agp_mtrr; |
int cant_use_aperture; |
unsigned long page_mask; |
}; |
/** |
* Scatter-gather memory. |
*/ |
struct drm_sg_mem { |
unsigned long handle; |
void *virtual; |
int pages; |
struct page **pagelist; |
dma_addr_t *busaddr; |
}; |
struct drm_sigdata { |
int context; |
struct drm_hw_lock *lock; |
}; |
/** |
* Kernel side of a mapping |
*/ |
struct drm_local_map { |
resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ |
unsigned long size; /**< Requested physical size (bytes) */ |
enum drm_map_type type; /**< Type of memory to map */ |
enum drm_map_flags flags; /**< Flags */ |
void *handle; /**< User-space: "Handle" to pass to mmap() */ |
/**< Kernel-space: kernel-virtual address */ |
int mtrr; /**< MTRR slot used */ |
}; |
typedef struct drm_local_map drm_local_map_t; |
/** |
* Mappings list |
*/ |
struct drm_map_list { |
struct list_head head; /**< list head */ |
struct drm_hash_item hash; |
struct drm_local_map *map; /**< mapping */ |
uint64_t user_token; |
struct drm_master *master; |
struct drm_mm_node *file_offset_node; /**< fake offset */ |
}; |
/** |
* Context handle list |
*/ |
struct drm_ctx_list { |
struct list_head head; /**< list head */ |
drm_context_t handle; /**< context handle */ |
struct drm_file *tag; /**< associated fd private data */ |
}; |
/* location of GART table */ |
#define DRM_ATI_GART_MAIN 1 |
#define DRM_ATI_GART_FB 2 |
#define DRM_ATI_GART_PCI 1 |
#define DRM_ATI_GART_PCIE 2 |
#define DRM_ATI_GART_IGP 3 |
struct drm_ati_pcigart_info { |
int gart_table_location; |
int gart_reg_if; |
void *addr; |
dma_addr_t bus_addr; |
dma_addr_t table_mask; |
struct drm_dma_handle *table_handle; |
struct drm_local_map mapping; |
int table_size; |
}; |
/** |
* GEM specific mm private for tracking GEM objects |
*/ |
struct drm_gem_mm { |
struct drm_mm offset_manager; /**< Offset mgmt for buffer objects */ |
struct drm_open_hash offset_hash; /**< User token hash table for maps */ |
}; |
/** |
* This structure defines the drm_mm memory object, which will be used by the |
* DRM for its buffer objects. |
*/ |
struct drm_gem_object { |
/** Reference count of this object */ |
struct kref refcount; |
/** Handle count of this object. Each handle also holds a reference */ |
struct kref handlecount; |
/** Related drm device */ |
struct drm_device *dev; |
/** File representing the shmem storage */ |
struct file *filp; |
/* Mapping info for this object */ |
struct drm_map_list map_list; |
/** |
* Size of the object, in bytes. Immutable over the object's |
* lifetime. |
*/ |
size_t size; |
/** |
* Global name for this object, starts at 1. 0 means unnamed. |
* Access is covered by the object_name_lock in the related drm_device |
*/ |
int name; |
/** |
* Memory domains. These monitor which caches contain read/write data |
* related to the object. When transitioning from one set of domains |
* to another, the driver is called to ensure that caches are suitably |
* flushed and invalidated |
*/ |
uint32_t read_domains; |
uint32_t write_domain; |
/** |
* While validating an exec operation, the |
* new read/write domain values are computed here. |
* They will be transferred to the above values |
* at the point that any cache flushing occurs |
*/ |
uint32_t pending_read_domains; |
uint32_t pending_write_domain; |
void *driver_private; |
}; |
#include "drm_crtc.h" |
/* per-master structure */ |
struct drm_master { |
struct kref refcount; /* refcount for this master */ |
struct list_head head; /**< each minor contains a list of masters */ |
struct drm_minor *minor; /**< link back to minor we are a master for */ |
char *unique; /**< Unique identifier: e.g., busid */ |
int unique_len; /**< Length of unique field */ |
int unique_size; /**< amount allocated */ |
int blocked; /**< Blocked due to VC switch? */ |
/** \name Authentication */ |
/*@{ */ |
struct drm_open_hash magiclist; |
struct list_head magicfree; |
/*@} */ |
struct drm_lock_data lock; /**< Information on hardware lock */ |
void *driver_priv; /**< Private structure for driver to use */ |
}; |
/** |
* DRM driver structure. This structure represent the common code for |
* a family of cards. There will one drm_device for each card present |
* in this family |
*/ |
struct drm_driver { |
int (*load) (struct drm_device *, unsigned long flags); |
int (*firstopen) (struct drm_device *); |
int (*open) (struct drm_device *, struct drm_file *); |
void (*preclose) (struct drm_device *, struct drm_file *file_priv); |
void (*postclose) (struct drm_device *, struct drm_file *); |
void (*lastclose) (struct drm_device *); |
int (*unload) (struct drm_device *); |
int (*suspend) (struct drm_device *, pm_message_t state); |
int (*resume) (struct drm_device *); |
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); |
void (*dma_ready) (struct drm_device *); |
int (*dma_quiescent) (struct drm_device *); |
int (*context_ctor) (struct drm_device *dev, int context); |
int (*context_dtor) (struct drm_device *dev, int context); |
int (*kernel_context_switch) (struct drm_device *dev, int old, |
int new); |
void (*kernel_context_switch_unlock) (struct drm_device *dev); |
/** |
* get_vblank_counter - get raw hardware vblank counter |
* @dev: DRM device |
* @crtc: counter to fetch |
* |
* Driver callback for fetching a raw hardware vblank counter |
* for @crtc. If a device doesn't have a hardware counter, the |
* driver can simply return the value of drm_vblank_count and |
* make the enable_vblank() and disable_vblank() hooks into no-ops, |
* leaving interrupts enabled at all times. |
* |
* Wraparound handling and loss of events due to modesetting is dealt |
* with in the DRM core code. |
* |
* RETURNS |
* Raw vblank counter value. |
*/ |
u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); |
/** |
* enable_vblank - enable vblank interrupt events |
* @dev: DRM device |
* @crtc: which irq to enable |
* |
* Enable vblank interrupts for @crtc. If the device doesn't have |
* a hardware vblank counter, this routine should be a no-op, since |
* interrupts will have to stay on to keep the count accurate. |
* |
* RETURNS |
* Zero on success, appropriate errno if the given @crtc's vblank |
* interrupt cannot be enabled. |
*/ |
int (*enable_vblank) (struct drm_device *dev, int crtc); |
/** |
* disable_vblank - disable vblank interrupt events |
* @dev: DRM device |
* @crtc: which irq to enable |
* |
* Disable vblank interrupts for @crtc. If the device doesn't have |
* a hardware vblank counter, this routine should be a no-op, since |
* interrupts will have to stay on to keep the count accurate. |
*/ |
void (*disable_vblank) (struct drm_device *dev, int crtc); |
/** |
* Called by \c drm_device_is_agp. Typically used to determine if a |
* card is really attached to AGP or not. |
* |
* \param dev DRM device handle |
* |
* \returns |
* One of three values is returned depending on whether or not the |
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP |
* (return of 1), or may or may not be AGP (return of 2). |
*/ |
int (*device_is_agp) (struct drm_device *dev); |
/* these have to be filled in */ |
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); |
void (*irq_preinstall) (struct drm_device *dev); |
int (*irq_postinstall) (struct drm_device *dev); |
void (*irq_uninstall) (struct drm_device *dev); |
void (*reclaim_buffers) (struct drm_device *dev, |
struct drm_file * file_priv); |
void (*reclaim_buffers_locked) (struct drm_device *dev, |
struct drm_file *file_priv); |
void (*reclaim_buffers_idlelocked) (struct drm_device *dev, |
struct drm_file *file_priv); |
resource_size_t (*get_map_ofs) (struct drm_local_map * map); |
resource_size_t (*get_reg_ofs) (struct drm_device *dev); |
void (*set_version) (struct drm_device *dev, |
struct drm_set_version *sv); |
/* Master routines */ |
int (*master_create)(struct drm_device *dev, struct drm_master *master); |
void (*master_destroy)(struct drm_device *dev, struct drm_master *master); |
int (*proc_init)(struct drm_minor *minor); |
void (*proc_cleanup)(struct drm_minor *minor); |
int (*debugfs_init)(struct drm_minor *minor); |
void (*debugfs_cleanup)(struct drm_minor *minor); |
/** |
* Driver-specific constructor for drm_gem_objects, to set up |
* obj->driver_private. |
* |
* Returns 0 on success. |
*/ |
int (*gem_init_object) (struct drm_gem_object *obj); |
void (*gem_free_object) (struct drm_gem_object *obj); |
/* Driver private ops for this object */ |
struct vm_operations_struct *gem_vm_ops; |
int major; |
int minor; |
int patchlevel; |
char *name; |
char *desc; |
char *date; |
u32 driver_features; |
int dev_priv_size; |
struct drm_ioctl_desc *ioctls; |
int num_ioctls; |
struct file_operations fops; |
struct pci_driver pci_driver; |
/* List of devices hanging off this driver */ |
struct list_head device_list; |
}; |
#define DRM_MINOR_UNASSIGNED 0 |
#define DRM_MINOR_LEGACY 1 |
#define DRM_MINOR_CONTROL 2 |
#define DRM_MINOR_RENDER 3 |
/** |
* debugfs node list. This structure represents a debugfs file to |
* be created by the drm core |
*/ |
struct drm_debugfs_list { |
const char *name; /** file name */ |
int (*show)(struct seq_file*, void*); /** show callback */ |
u32 driver_features; /**< Required driver features for this entry */ |
}; |
/** |
* debugfs node structure. This structure represents a debugfs file. |
*/ |
struct drm_debugfs_node { |
struct list_head list; |
struct drm_minor *minor; |
struct drm_debugfs_list *debugfs_ent; |
struct dentry *dent; |
}; |
/** |
* Info file list entry. This structure represents a debugfs or proc file to |
* be created by the drm core |
*/ |
struct drm_info_list { |
const char *name; /** file name */ |
int (*show)(struct seq_file*, void*); /** show callback */ |
u32 driver_features; /**< Required driver features for this entry */ |
void *data; |
}; |
/** |
* debugfs node structure. This structure represents a debugfs file. |
*/ |
struct drm_info_node { |
struct list_head list; |
struct drm_minor *minor; |
struct drm_info_list *info_ent; |
struct dentry *dent; |
}; |
/** |
* DRM minor structure. This structure represents a drm minor number. |
*/ |
struct drm_minor { |
int index; /**< Minor device number */ |
int type; /**< Control or render */ |
dev_t device; /**< Device number for mknod */ |
struct device kdev; /**< Linux device */ |
struct drm_device *dev; |
struct proc_dir_entry *proc_root; /**< proc directory entry */ |
struct drm_info_node proc_nodes; |
struct dentry *debugfs_root; |
struct drm_info_node debugfs_nodes; |
struct drm_master *master; /* currently active master for this node */ |
struct list_head master_list; |
struct drm_mode_group mode_group; |
}; |
#endif |
/** |
* DRM device structure. This structure represent a complete card that |
* may contain multiple heads. |
*/ |
struct drm_device { |
struct list_head driver_item; /**< list of devices per driver */ |
char *devname; /**< For /proc/interrupts */ |
int if_version; /**< Highest interface version set */ |
/** \name Locks */ |
/*@{ */ |
// spinlock_t count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ |
// struct mutex struct_mutex; /**< For others */ |
/*@} */ |
/** \name Usage Counters */ |
/*@{ */ |
int open_count; /**< Outstanding files open */ |
// atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ |
// atomic_t vma_count; /**< Outstanding vma areas open */ |
int buf_use; /**< Buffers in use -- cannot alloc */ |
// atomic_t buf_alloc; /**< Buffer allocation in progress */ |
/*@} */ |
/** \name Performance counters */ |
/*@{ */ |
unsigned long counters; |
// enum drm_stat_type types[15]; |
// atomic_t counts[15]; |
/*@} */ |
struct list_head filelist; |
/** \name Memory management */ |
/*@{ */ |
struct list_head maplist; /**< Linked list of regions */ |
int map_count; /**< Number of mappable regions */ |
// struct drm_open_hash map_hash; /**< User token hash table for maps */ |
/** \name Context handle management */ |
/*@{ */ |
struct list_head ctxlist; /**< Linked list of context handles */ |
int ctx_count; /**< Number of context handles */ |
// struct mutex ctxlist_mutex; /**< For ctxlist */ |
// struct idr ctx_idr; |
struct list_head vmalist; /**< List of vmas (for debugging) */ |
/*@} */ |
/** \name DMA queues (contexts) */ |
/*@{ */ |
int queue_count; /**< Number of active DMA queues */ |
int queue_reserved; /**< Number of reserved DMA queues */ |
int queue_slots; /**< Actual length of queuelist */ |
// struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ |
// struct drm_device_dma *dma; /**< Optional pointer for DMA support */ |
/*@} */ |
/** \name Context support */ |
/*@{ */ |
int irq_enabled; /**< True if irq handler is enabled */ |
__volatile__ long context_flag; /**< Context swapping flag */ |
__volatile__ long interrupt_flag; /**< Interruption handler flag */ |
__volatile__ long dma_flag; /**< DMA dispatch flag */ |
// struct timer_list timer; /**< Timer for delaying ctx switch */ |
// wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ |
int last_checked; /**< Last context checked for DMA */ |
int last_context; /**< Last current context */ |
unsigned long last_switch; /**< jiffies at last context switch */ |
/*@} */ |
// struct work_struct work; |
/** \name VBLANK IRQ support */ |
/*@{ */ |
/* |
* At load time, disabling the vblank interrupt won't be allowed since |
* old clients may not call the modeset ioctl and therefore misbehave. |
* Once the modeset ioctl *has* been called though, we can safely |
* disable them when unused. |
*/ |
int vblank_disable_allowed; |
// wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ |
// atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ |
// spinlock_t vbl_lock; |
// atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ |
u32 *last_vblank; /* protected by dev->vbl_lock, used */ |
/* for wraparound handling */ |
int *vblank_enabled; /* so we don't call enable more than |
once per disable */ |
int *vblank_inmodeset; /* Display driver is setting mode */ |
u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */ |
// struct timer_list vblank_disable_timer; |
u32 max_vblank_count; /**< size of vblank counter register */ |
/*@} */ |
// cycles_t ctx_start; |
// cycles_t lck_start; |
// struct fasync_struct *buf_async;/**< Processes waiting for SIGIO */ |
// wait_queue_head_t buf_readers; /**< Processes waiting to read */ |
// wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ |
// struct drm_agp_head *agp; /**< AGP data */ |
struct pci_dev *pdev; /**< PCI device structure */ |
int pci_vendor; /**< PCI vendor id */ |
int pci_device; /**< PCI device id */ |
// struct drm_sg_mem *sg; /**< Scatter gather memory */ |
int num_crtcs; /**< Number of CRTCs on this device */ |
void *dev_private; /**< device private data */ |
void *mm_private; |
// struct address_space *dev_mapping; |
// struct drm_sigdata sigdata; /**< For block_all_signals */ |
// sigset_t sigmask; |
// struct drm_driver *driver; |
// struct drm_local_map *agp_buffer_map; |
// unsigned int agp_buffer_token; |
// struct drm_minor *control; /**< Control node for card */ |
// struct drm_minor *primary; /**< render type primary screen head */ |
/** \name Drawable information */ |
/*@{ */ |
// spinlock_t drw_lock; |
// struct idr drw_idr; |
/*@} */ |
struct drm_mode_config mode_config; /**< Current mode config */ |
/** \name GEM information */ |
/*@{ */ |
// spinlock_t object_name_lock; |
// struct idr object_name_idr; |
// atomic_t object_count; |
// atomic_t object_memory; |
// atomic_t pin_count; |
// atomic_t pin_memory; |
// atomic_t gtt_count; |
// atomic_t gtt_memory; |
// uint32_t gtt_total; |
uint32_t invalidate_domains; /* domains pending invalidation */ |
uint32_t flush_domains; /* domains pending flush */ |
/*@} */ |
}; |
#if 0 |
static inline int drm_dev_to_irq(struct drm_device *dev) |
{ |
return dev->pdev->irq; |
} |
static __inline__ int drm_core_check_feature(struct drm_device *dev, |
int feature) |
{ |
return ((dev->driver->driver_features & feature) ? 1 : 0); |
} |
#ifdef __alpha__ |
#define drm_get_pci_domain(dev) dev->hose->index |
#else |
#define drm_get_pci_domain(dev) 0 |
#endif |
#if __OS_HAS_AGP |
static inline int drm_core_has_AGP(struct drm_device *dev) |
{ |
return drm_core_check_feature(dev, DRIVER_USE_AGP); |
} |
#else |
#define drm_core_has_AGP(dev) (0) |
#endif |
#if __OS_HAS_MTRR |
static inline int drm_core_has_MTRR(struct drm_device *dev) |
{ |
return drm_core_check_feature(dev, DRIVER_USE_MTRR); |
} |
#define DRM_MTRR_WC MTRR_TYPE_WRCOMB |
static inline int drm_mtrr_add(unsigned long offset, unsigned long size, |
unsigned int flags) |
{ |
return mtrr_add(offset, size, flags, 1); |
} |
static inline int drm_mtrr_del(int handle, unsigned long offset, |
unsigned long size, unsigned int flags) |
{ |
return mtrr_del(handle, offset, size); |
} |
#else |
#define drm_core_has_MTRR(dev) (0) |
#define DRM_MTRR_WC 0 |
static inline int drm_mtrr_add(unsigned long offset, unsigned long size, |
unsigned int flags) |
{ |
return 0; |
} |
static inline int drm_mtrr_del(int handle, unsigned long offset, |
unsigned long size, unsigned int flags) |
{ |
return 0; |
} |
#endif |
/******************************************************************/ |
/** \name Internal function definitions */ |
/*@{*/ |
/* Driver support (drm_drv.h) */ |
extern int drm_init(struct drm_driver *driver); |
extern void drm_exit(struct drm_driver *driver); |
extern int drm_ioctl(struct inode *inode, struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern long drm_compat_ioctl(struct file *filp, |
unsigned int cmd, unsigned long arg); |
extern int drm_lastclose(struct drm_device *dev); |
/* Device support (drm_fops.h) */ |
extern int drm_open(struct inode *inode, struct file *filp); |
extern int drm_stub_open(struct inode *inode, struct file *filp); |
extern int drm_fasync(int fd, struct file *filp, int on); |
extern int drm_release(struct inode *inode, struct file *filp); |
/* Mapping support (drm_vm.h) */ |
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
extern void drm_vm_open_locked(struct vm_area_struct *vma); |
extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); |
extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); |
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
/* Memory management support (drm_memory.h) */ |
#include "drm_memory.h" |
extern void drm_mem_init(void); |
extern int drm_mem_info(char *buf, char **start, off_t offset, |
int request, int *eof, void *data); |
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); |
extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); |
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); |
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); |
extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, |
struct page **pages, |
unsigned long num_pages, |
uint32_t gtt_offset, |
uint32_t type); |
extern int drm_unbind_agp(DRM_AGP_MEM * handle); |
/* Misc. IOCTL support (drm_ioctl.h) */ |
extern int drm_irq_by_busid(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setunique(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getmap(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getclient(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getstats(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_setversion(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_noop(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Context IOCTL support (drm_context.h) */ |
extern int drm_resctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_modctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_switchctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_newctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmctx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_ctxbitmap_init(struct drm_device *dev); |
extern void drm_ctxbitmap_cleanup(struct drm_device *dev); |
extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); |
extern int drm_setsareactx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_getsareactx(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Drawable IOCTL support (drm_drawable.h) */ |
extern int drm_adddraw(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmdraw(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_update_drawable_info(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, |
drm_drawable_t id); |
extern void drm_drawable_free_all(struct drm_device *dev); |
/* Authentication IOCTL support (drm_auth.h) */ |
extern int drm_getmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_authmagic(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* Cache management (drm_cache.c) */ |
void drm_clflush_pages(struct page *pages[], unsigned long num_pages); |
/* Locking IOCTL support (drm_lock.h) */ |
extern int drm_lock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_unlock(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); |
extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); |
extern void drm_idlelock_take(struct drm_lock_data *lock_data); |
extern void drm_idlelock_release(struct drm_lock_data *lock_data); |
/* |
* These are exported to drivers so that they can implement fencing using |
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock. |
*/ |
extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); |
/* Buffer management support (drm_bufs.h) */ |
extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); |
extern int drm_addmap(struct drm_device *dev, resource_size_t offset, |
unsigned int size, enum drm_map_type type, |
enum drm_map_flags flags, struct drm_local_map **map_ptr); |
extern int drm_addmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map); |
extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_addbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_infobufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_markbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_freebufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_mapbufs(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_order(unsigned long size); |
extern resource_size_t drm_get_resource_start(struct drm_device *dev, |
unsigned int resource); |
extern resource_size_t drm_get_resource_len(struct drm_device *dev, |
unsigned int resource); |
/* DMA support (drm_dma.h) */ |
extern int drm_dma_setup(struct drm_device *dev); |
extern void drm_dma_takedown(struct drm_device *dev); |
extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); |
extern void drm_core_reclaim_buffers(struct drm_device *dev, |
struct drm_file *filp); |
/* IRQ support (drm_irq.h) */ |
extern int drm_control(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); |
extern int drm_irq_install(struct drm_device *dev); |
extern int drm_irq_uninstall(struct drm_device *dev); |
extern void drm_driver_irq_preinstall(struct drm_device *dev); |
extern void drm_driver_irq_postinstall(struct drm_device *dev); |
extern void drm_driver_irq_uninstall(struct drm_device *dev); |
extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); |
extern int drm_wait_vblank(struct drm_device *dev, void *data, |
struct drm_file *filp); |
extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); |
extern u32 drm_vblank_count(struct drm_device *dev, int crtc); |
extern void drm_handle_vblank(struct drm_device *dev, int crtc); |
extern int drm_vblank_get(struct drm_device *dev, int crtc); |
extern void drm_vblank_put(struct drm_device *dev, int crtc); |
extern void drm_vblank_cleanup(struct drm_device *dev); |
/* Modesetting support */ |
extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); |
extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); |
extern int drm_modeset_ctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* AGP/GART support (drm_agpsupport.h) */ |
extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); |
extern int drm_agp_acquire(struct drm_device *dev); |
extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_release(struct drm_device *dev); |
extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); |
extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); |
extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); |
extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); |
extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); |
extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); |
extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size_t pages, u32 type); |
extern int drm_agp_free_memory(DRM_AGP_MEM * handle); |
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); |
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); |
extern void drm_agp_chipset_flush(struct drm_device *dev); |
/* Stub support (drm_stub.h) */ |
extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
struct drm_master *drm_master_create(struct drm_minor *minor); |
extern struct drm_master *drm_master_get(struct drm_master *master); |
extern void drm_master_put(struct drm_master **master); |
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, |
struct drm_driver *driver); |
extern void drm_put_dev(struct drm_device *dev); |
extern int drm_put_minor(struct drm_minor **minor); |
extern unsigned int drm_debug; |
extern struct class *drm_class; |
extern struct proc_dir_entry *drm_proc_root; |
extern struct dentry *drm_debugfs_root; |
extern struct idr drm_minors_idr; |
extern struct drm_local_map *drm_getsarea(struct drm_device *dev); |
/* Proc support (drm_proc.h) */ |
extern int drm_proc_init(struct drm_minor *minor, int minor_id, |
struct proc_dir_entry *root); |
extern int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root); |
/* Debugfs support */ |
#if defined(CONFIG_DEBUG_FS) |
extern int drm_debugfs_init(struct drm_minor *minor, int minor_id, |
struct dentry *root); |
extern int drm_debugfs_create_files(struct drm_info_list *files, int count, |
struct dentry *root, struct drm_minor *minor); |
extern int drm_debugfs_remove_files(struct drm_info_list *files, int count, |
struct drm_minor *minor); |
extern int drm_debugfs_cleanup(struct drm_minor *minor); |
#endif |
/* Info file support */ |
extern int drm_name_info(struct seq_file *m, void *data); |
extern int drm_vm_info(struct seq_file *m, void *data); |
extern int drm_queues_info(struct seq_file *m, void *data); |
extern int drm_bufs_info(struct seq_file *m, void *data); |
extern int drm_vblank_info(struct seq_file *m, void *data); |
extern int drm_clients_info(struct seq_file *m, void* data); |
extern int drm_gem_name_info(struct seq_file *m, void *data); |
extern int drm_gem_object_info(struct seq_file *m, void* data); |
#if DRM_DEBUG_CODE |
extern int drm_vma_info(struct seq_file *m, void *data); |
#endif |
/* Scatter Gather Support (drm_scatter.h) */ |
extern void drm_sg_cleanup(struct drm_sg_mem * entry); |
extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); |
extern int drm_sg_free(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
/* ATI PCIGART support (ati_pcigart.h) */ |
extern int drm_ati_pcigart_init(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, |
struct drm_ati_pcigart_info * gart_info); |
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, |
size_t align, dma_addr_t maxaddr); |
extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); |
/* sysfs support (drm_sysfs.c) */ |
struct drm_sysfs_class; |
extern struct class *drm_sysfs_create(struct module *owner, char *name); |
extern void drm_sysfs_destroy(void); |
extern int drm_sysfs_device_add(struct drm_minor *minor); |
extern void drm_sysfs_hotplug_event(struct drm_device *dev); |
extern void drm_sysfs_device_remove(struct drm_minor *minor); |
extern char *drm_get_connector_status_name(enum drm_connector_status status); |
extern int drm_sysfs_connector_add(struct drm_connector *connector); |
extern void drm_sysfs_connector_remove(struct drm_connector *connector); |
/* Graphics Execution Manager library functions (drm_gem.c) */ |
int drm_gem_init(struct drm_device *dev); |
void drm_gem_destroy(struct drm_device *dev); |
void drm_gem_object_free(struct kref *kref); |
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
size_t size); |
void drm_gem_object_handle_free(struct kref *kref); |
void drm_gem_vm_open(struct vm_area_struct *vma); |
void drm_gem_vm_close(struct vm_area_struct *vma); |
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
static inline void |
drm_gem_object_reference(struct drm_gem_object *obj) |
{ |
kref_get(&obj->refcount); |
} |
static inline void |
drm_gem_object_unreference(struct drm_gem_object *obj) |
{ |
if (obj == NULL) |
return; |
kref_put(&obj->refcount, drm_gem_object_free); |
} |
int drm_gem_handle_create(struct drm_file *file_priv, |
struct drm_gem_object *obj, |
int *handlep); |
static inline void |
drm_gem_object_handle_reference(struct drm_gem_object *obj) |
{ |
drm_gem_object_reference(obj); |
kref_get(&obj->handlecount); |
} |
static inline void |
drm_gem_object_handle_unreference(struct drm_gem_object *obj) |
{ |
if (obj == NULL) |
return; |
/* |
* Must bump handle count first as this may be the last |
* ref, in which case the object would disappear before we |
* checked for a name |
*/ |
kref_put(&obj->handlecount, drm_gem_object_handle_free); |
drm_gem_object_unreference(obj); |
} |
struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, |
struct drm_file *filp, |
int handle); |
int drm_gem_close_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_flink_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
int drm_gem_open_ioctl(struct drm_device *dev, void *data, |
struct drm_file *file_priv); |
void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); |
void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); |
extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); |
extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); |
static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, |
unsigned int token) |
{ |
struct drm_map_list *_entry; |
list_for_each_entry(_entry, &dev->maplist, head) |
if (_entry->user_token == token) |
return _entry->map; |
return NULL; |
} |
static __inline__ int drm_device_is_agp(struct drm_device *dev) |
{ |
if (dev->driver->device_is_agp != NULL) { |
int err = (*dev->driver->device_is_agp) (dev); |
if (err != 2) { |
return err; |
} |
} |
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); |
} |
static __inline__ int drm_device_is_pcie(struct drm_device *dev) |
{ |
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); |
} |
static __inline__ void drm_core_dropmap(struct drm_local_map *map) |
{ |
} |
static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) |
{ |
if (size * nmemb <= PAGE_SIZE) |
return kcalloc(nmemb, size, GFP_KERNEL); |
if (size != 0 && nmemb > ULONG_MAX / size) |
return NULL; |
return __vmalloc(size * nmemb, |
GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL); |
} |
static __inline void drm_free_large(void *ptr) |
{ |
if (!is_vmalloc_addr(ptr)) |
return kfree(ptr); |
vfree(ptr); |
} |
/*@}*/ |
#endif |
#endif |
/drivers/video/drm/include/drm_crtc.h |
---|
0,0 → 1,743 |
/* |
* Copyright © 2006 Keith Packard |
* Copyright © 2007-2008 Dave Airlie |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
#ifndef __DRM_CRTC_H__ |
#define __DRM_CRTC_H__ |
#include <linux/i2c.h> |
//#include <linux/spinlock.h> |
//#include <linux/types.h> |
#include <linux/idr.h> |
//#include <linux/fb.h> |
struct drm_device; |
struct drm_mode_set; |
struct drm_framebuffer; |
#define DRM_MODE_OBJECT_CRTC 0xcccccccc |
#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 |
#define DRM_MODE_OBJECT_ENCODER 0xe0e0e0e0 |
#define DRM_MODE_OBJECT_MODE 0xdededede |
#define DRM_MODE_OBJECT_PROPERTY 0xb0b0b0b0 |
#define DRM_MODE_OBJECT_FB 0xfbfbfbfb |
#define DRM_MODE_OBJECT_BLOB 0xbbbbbbbb |
struct drm_mode_object { |
uint32_t id; |
uint32_t type; |
}; |
/* |
* Note on terminology: here, for brevity and convenience, we refer to connector |
* control chips as 'CRTCs'. They can control any type of connector, VGA, LVDS, |
* DVI, etc. And 'screen' refers to the whole of the visible display, which |
* may span multiple monitors (and therefore multiple CRTC and connector |
* structures). |
*/ |
enum drm_mode_status { |
MODE_OK = 0, /* Mode OK */ |
MODE_HSYNC, /* hsync out of range */ |
MODE_VSYNC, /* vsync out of range */ |
MODE_H_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_V_ILLEGAL, /* mode has illegal horizontal timings */ |
MODE_BAD_WIDTH, /* requires an unsupported linepitch */ |
MODE_NOMODE, /* no mode with a maching name */ |
MODE_NO_INTERLACE, /* interlaced mode not supported */ |
MODE_NO_DBLESCAN, /* doublescan mode not supported */ |
MODE_NO_VSCAN, /* multiscan mode not supported */ |
MODE_MEM, /* insufficient video memory */ |
MODE_VIRTUAL_X, /* mode width too large for specified virtual size */ |
MODE_VIRTUAL_Y, /* mode height too large for specified virtual size */ |
MODE_MEM_VIRT, /* insufficient video memory given virtual size */ |
MODE_NOCLOCK, /* no fixed clock available */ |
MODE_CLOCK_HIGH, /* clock required is too high */ |
MODE_CLOCK_LOW, /* clock required is too low */ |
MODE_CLOCK_RANGE, /* clock/mode isn't in a ClockRange */ |
MODE_BAD_HVALUE, /* horizontal timing was out of range */ |
MODE_BAD_VVALUE, /* vertical timing was out of range */ |
MODE_BAD_VSCAN, /* VScan value out of range */ |
MODE_HSYNC_NARROW, /* horizontal sync too narrow */ |
MODE_HSYNC_WIDE, /* horizontal sync too wide */ |
MODE_HBLANK_NARROW, /* horizontal blanking too narrow */ |
MODE_HBLANK_WIDE, /* horizontal blanking too wide */ |
MODE_VSYNC_NARROW, /* vertical sync too narrow */ |
MODE_VSYNC_WIDE, /* vertical sync too wide */ |
MODE_VBLANK_NARROW, /* vertical blanking too narrow */ |
MODE_VBLANK_WIDE, /* vertical blanking too wide */ |
MODE_PANEL, /* exceeds panel dimensions */ |
MODE_INTERLACE_WIDTH, /* width too large for interlaced mode */ |
MODE_ONE_WIDTH, /* only one width is supported */ |
MODE_ONE_HEIGHT, /* only one height is supported */ |
MODE_ONE_SIZE, /* only one resolution is supported */ |
MODE_NO_REDUCED, /* monitor doesn't accept reduced blanking */ |
MODE_UNVERIFIED = -3, /* mode needs to reverified */ |
MODE_BAD = -2, /* unspecified reason */ |
MODE_ERROR = -1 /* error condition */ |
}; |
#define DRM_MODE_TYPE_CLOCK_CRTC_C (DRM_MODE_TYPE_CLOCK_C | \ |
DRM_MODE_TYPE_CRTC_C) |
#define DRM_MODE(nm, t, c, hd, hss, hse, ht, hsk, vd, vss, vse, vt, vs, f) \ |
.name = nm, .status = 0, .type = (t), .clock = (c), \ |
.hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ |
.htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ |
.vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ |
.vscan = (vs), .flags = (f), .vrefresh = 0 |
#define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ |
struct drm_display_mode { |
/* Header */ |
struct list_head head; |
struct drm_mode_object base; |
char name[DRM_DISPLAY_MODE_LEN]; |
int connector_count; |
enum drm_mode_status status; |
int type; |
/* Proposed mode values */ |
int clock; |
int hdisplay; |
int hsync_start; |
int hsync_end; |
int htotal; |
int hskew; |
int vdisplay; |
int vsync_start; |
int vsync_end; |
int vtotal; |
int vscan; |
unsigned int flags; |
/* Addressable image size (may be 0 for projectors, etc.) */ |
int width_mm; |
int height_mm; |
/* Actual mode we give to hw */ |
int clock_index; |
int synth_clock; |
int crtc_hdisplay; |
int crtc_hblank_start; |
int crtc_hblank_end; |
int crtc_hsync_start; |
int crtc_hsync_end; |
int crtc_htotal; |
int crtc_hskew; |
int crtc_vdisplay; |
int crtc_vblank_start; |
int crtc_vblank_end; |
int crtc_vsync_start; |
int crtc_vsync_end; |
int crtc_vtotal; |
int crtc_hadjusted; |
int crtc_vadjusted; |
/* Driver private mode info */ |
int private_size; |
int *private; |
int private_flags; |
int vrefresh; |
float hsync; |
}; |
enum drm_connector_status { |
connector_status_connected = 1, |
connector_status_disconnected = 2, |
connector_status_unknown = 3, |
}; |
enum subpixel_order { |
SubPixelUnknown = 0, |
SubPixelHorizontalRGB, |
SubPixelHorizontalBGR, |
SubPixelVerticalRGB, |
SubPixelVerticalBGR, |
SubPixelNone, |
}; |
/* |
* Describes a given display (e.g. CRT or flat panel) and its limitations. |
*/ |
struct drm_display_info { |
char name[DRM_DISPLAY_INFO_LEN]; |
/* Input info */ |
bool serration_vsync; |
bool sync_on_green; |
bool composite_sync; |
bool separate_syncs; |
bool blank_to_black; |
unsigned char video_level; |
bool digital; |
/* Physical size */ |
unsigned int width_mm; |
unsigned int height_mm; |
/* Display parameters */ |
unsigned char gamma; /* FIXME: storage format */ |
bool gtf_supported; |
bool standard_color; |
enum { |
monochrome = 0, |
rgb, |
other, |
unknown, |
} display_type; |
bool active_off_supported; |
bool suspend_supported; |
bool standby_supported; |
/* Color info FIXME: storage format */ |
unsigned short redx, redy; |
unsigned short greenx, greeny; |
unsigned short bluex, bluey; |
unsigned short whitex, whitey; |
/* Clock limits FIXME: storage format */ |
unsigned int min_vfreq, max_vfreq; |
unsigned int min_hfreq, max_hfreq; |
unsigned int pixel_clock; |
/* White point indices FIXME: storage format */ |
unsigned int wpx1, wpy1; |
unsigned int wpgamma1; |
unsigned int wpx2, wpy2; |
unsigned int wpgamma2; |
enum subpixel_order subpixel_order; |
char *raw_edid; /* if any */ |
}; |
struct drm_framebuffer_funcs { |
void (*destroy)(struct drm_framebuffer *framebuffer); |
int (*create_handle)(struct drm_framebuffer *fb, |
struct drm_file *file_priv, |
unsigned int *handle); |
}; |
struct drm_framebuffer { |
struct drm_device *dev; |
struct list_head head; |
struct drm_mode_object base; |
const struct drm_framebuffer_funcs *funcs; |
unsigned int pitch; |
unsigned int width; |
unsigned int height; |
/* depth can be 15 or 16 */ |
unsigned int depth; |
int bits_per_pixel; |
int flags; |
void *fbdev; |
u32 pseudo_palette[17]; |
struct list_head filp_head; |
}; |
struct drm_property_blob { |
struct drm_mode_object base; |
struct list_head head; |
unsigned int length; |
void *data; |
}; |
struct drm_property_enum { |
uint64_t value; |
struct list_head head; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_property { |
struct list_head head; |
struct drm_mode_object base; |
uint32_t flags; |
char name[DRM_PROP_NAME_LEN]; |
uint32_t num_values; |
uint64_t *values; |
struct list_head enum_blob_list; |
}; |
struct drm_crtc; |
struct drm_connector; |
struct drm_encoder; |
/** |
* drm_crtc_funcs - control CRTCs for a given device |
* @dpms: control display power levels |
* @save: save CRTC state |
* @resore: restore CRTC state |
* @lock: lock the CRTC |
* @unlock: unlock the CRTC |
* @shadow_allocate: allocate shadow pixmap |
* @shadow_create: create shadow pixmap for rotation support |
* @shadow_destroy: free shadow pixmap |
* @mode_fixup: fixup proposed mode |
* @mode_set: set the desired mode on the CRTC |
* @gamma_set: specify color ramp for CRTC |
* @destroy: deinit and free object. |
* |
* The drm_crtc_funcs structure is the central CRTC management structure |
* in the DRM. Each CRTC controls one or more connectors (note that the name |
* CRTC is simply historical, a CRTC may control LVDS, VGA, DVI, TV out, etc. |
* connectors, not just CRTs). |
* |
* Each driver is responsible for filling out this structure at startup time, |
* in addition to providing other modesetting features, like i2c and DDC |
* bus accessors. |
*/ |
struct drm_crtc_funcs { |
/* Save CRTC state */ |
void (*save)(struct drm_crtc *crtc); /* suspend? */ |
/* Restore CRTC state */ |
void (*restore)(struct drm_crtc *crtc); /* resume? */ |
/* cursor controls */ |
int (*cursor_set)(struct drm_crtc *crtc, struct drm_file *file_priv, |
uint32_t handle, uint32_t width, uint32_t height); |
int (*cursor_move)(struct drm_crtc *crtc, int x, int y); |
/* Set gamma on the CRTC */ |
void (*gamma_set)(struct drm_crtc *crtc, u16_t *r, u16_t *g, u16_t *b, |
uint32_t size); |
/* Object destroy routine */ |
void (*destroy)(struct drm_crtc *crtc); |
int (*set_config)(struct drm_mode_set *set); |
}; |
/** |
* drm_crtc - central CRTC control structure |
* @enabled: is this CRTC enabled? |
* @x: x position on screen |
* @y: y position on screen |
* @desired_mode: new desired mode |
* @desired_x: desired x for desired_mode |
* @desired_y: desired y for desired_mode |
* @funcs: CRTC control functions |
* |
* Each CRTC may have one or more connectors associated with it. This structure |
* allows the CRTC to be controlled. |
*/ |
struct drm_crtc { |
struct drm_device *dev; |
struct list_head head; |
struct drm_mode_object base; |
/* framebuffer the connector is currently bound to */ |
struct drm_framebuffer *fb; |
bool enabled; |
struct drm_display_mode mode; |
int x, y; |
struct drm_display_mode *desired_mode; |
int desired_x, desired_y; |
const struct drm_crtc_funcs *funcs; |
/* CRTC gamma size for reporting to userspace */ |
uint32_t gamma_size; |
uint16_t *gamma_store; |
/* if you are using the helper */ |
void *helper_private; |
}; |
/** |
* drm_connector_funcs - control connectors on a given device |
* @dpms: set power state (see drm_crtc_funcs above) |
* @save: save connector state |
* @restore: restore connector state |
* @mode_valid: is this mode valid on the given connector? |
* @mode_fixup: try to fixup proposed mode for this connector |
* @mode_set: set this mode |
* @detect: is this connector active? |
* @get_modes: get mode list for this connector |
* @set_property: property for this connector may need update |
* @destroy: make object go away |
* |
* Each CRTC may have one or more connectors attached to it. The functions |
* below allow the core DRM code to control connectors, enumerate available modes, |
* etc. |
*/ |
struct drm_connector_funcs { |
void (*dpms)(struct drm_connector *connector, int mode); |
void (*save)(struct drm_connector *connector); |
void (*restore)(struct drm_connector *connector); |
enum drm_connector_status (*detect)(struct drm_connector *connector); |
int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); |
int (*set_property)(struct drm_connector *connector, struct drm_property *property, |
uint64_t val); |
void (*destroy)(struct drm_connector *connector); |
}; |
struct drm_encoder_funcs { |
void (*destroy)(struct drm_encoder *encoder); |
}; |
#define DRM_CONNECTOR_MAX_UMODES 16 |
#define DRM_CONNECTOR_MAX_PROPERTY 16 |
#define DRM_CONNECTOR_LEN 32 |
#define DRM_CONNECTOR_MAX_ENCODER 2 |
/** |
* drm_encoder - central DRM encoder structure |
*/ |
struct drm_encoder { |
struct drm_device *dev; |
struct list_head head; |
struct drm_mode_object base; |
int encoder_type; |
uint32_t possible_crtcs; |
uint32_t possible_clones; |
struct drm_crtc *crtc; |
const struct drm_encoder_funcs *funcs; |
void *helper_private; |
}; |
/** |
* drm_connector - central DRM connector control structure |
* @crtc: CRTC this connector is currently connected to, NULL if none |
* @interlace_allowed: can this connector handle interlaced modes? |
* @doublescan_allowed: can this connector handle doublescan? |
* @available_modes: modes available on this connector (from get_modes() + user) |
* @initial_x: initial x position for this connector |
* @initial_y: initial y position for this connector |
* @status: connector connected? |
* @funcs: connector control functions |
* |
* Each connector may be connected to one or more CRTCs, or may be clonable by |
* another connector if they can share a CRTC. Each connector also has a specific |
* position in the broader display (referred to as a 'screen' though it could |
* span multiple monitors). |
*/ |
struct drm_connector { |
struct drm_device *dev; |
// struct device kdev; |
struct device_attribute *attr; |
struct list_head head; |
struct drm_mode_object base; |
int connector_type; |
int connector_type_id; |
bool interlace_allowed; |
bool doublescan_allowed; |
struct list_head modes; /* list of modes on this connector */ |
int initial_x, initial_y; |
enum drm_connector_status status; |
/* these are modes added by probing with DDC or the BIOS */ |
struct list_head probed_modes; |
struct drm_display_info display_info; |
const struct drm_connector_funcs *funcs; |
struct list_head user_modes; |
struct drm_property_blob *edid_blob_ptr; |
u32_t property_ids[DRM_CONNECTOR_MAX_PROPERTY]; |
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY]; |
/* requested DPMS state */ |
int dpms; |
void *helper_private; |
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; |
uint32_t force_encoder_id; |
struct drm_encoder *encoder; /* currently active encoder */ |
}; |
/** |
* struct drm_mode_set |
* |
* Represents a single crtc the connectors that it drives with what mode |
* and from which framebuffer it scans out from. |
* |
* This is used to set modes. |
*/ |
struct drm_mode_set { |
struct list_head head; |
struct drm_framebuffer *fb; |
struct drm_crtc *crtc; |
struct drm_display_mode *mode; |
uint32_t x; |
uint32_t y; |
struct drm_connector **connectors; |
size_t num_connectors; |
}; |
/** |
* struct drm_mode_config_funcs - configure CRTCs for a given screen layout |
* @resize: adjust CRTCs as necessary for the proposed layout |
* |
* Currently only a resize hook is available. DRM will call back into the |
* driver with a new screen width and height. If the driver can't support |
* the proposed size, it can return false. Otherwise it should adjust |
* the CRTC<->connector mappings as needed and update its view of the screen. |
*/ |
struct drm_mode_config_funcs { |
struct drm_framebuffer *(*fb_create)(struct drm_device *dev, struct drm_file *file_priv, struct drm_mode_fb_cmd *mode_cmd); |
int (*fb_changed)(struct drm_device *dev); |
}; |
struct drm_mode_group { |
uint32_t num_crtcs; |
uint32_t num_encoders; |
uint32_t num_connectors; |
/* list of object IDs for this group */ |
uint32_t *id_list; |
}; |
/** |
* drm_mode_config - Mode configuration control structure |
* |
*/ |
struct drm_mode_config { |
// struct mutex mutex; /* protects configuration (mode lists etc.) */ |
// struct mutex idr_mutex; /* for IDR management */ |
struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ |
/* this is limited to one for now */ |
int num_fb; |
struct list_head fb_list; |
int num_connector; |
struct list_head connector_list; |
int num_encoder; |
struct list_head encoder_list; |
int num_crtc; |
struct list_head crtc_list; |
struct list_head property_list; |
/* in-kernel framebuffers - hung of filp_head in drm_framebuffer */ |
struct list_head fb_kernel_list; |
int min_width, min_height; |
int max_width, max_height; |
struct drm_mode_config_funcs *funcs; |
resource_size_t fb_base; |
/* pointers to standard properties */ |
struct list_head property_blob_list; |
struct drm_property *edid_property; |
struct drm_property *dpms_property; |
/* DVI-I properties */ |
struct drm_property *dvi_i_subconnector_property; |
struct drm_property *dvi_i_select_subconnector_property; |
/* TV properties */ |
struct drm_property *tv_subconnector_property; |
struct drm_property *tv_select_subconnector_property; |
struct drm_property *tv_mode_property; |
struct drm_property *tv_left_margin_property; |
struct drm_property *tv_right_margin_property; |
struct drm_property *tv_top_margin_property; |
struct drm_property *tv_bottom_margin_property; |
/* Optional properties */ |
struct drm_property *scaling_mode_property; |
struct drm_property *dithering_mode_property; |
}; |
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) |
#define obj_to_connector(x) container_of(x, struct drm_connector, base) |
#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) |
#define obj_to_mode(x) container_of(x, struct drm_display_mode, base) |
#define obj_to_fb(x) container_of(x, struct drm_framebuffer, base) |
#define obj_to_property(x) container_of(x, struct drm_property, base) |
#define obj_to_blob(x) container_of(x, struct drm_property_blob, base) |
extern void drm_crtc_init(struct drm_device *dev, |
struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs); |
extern void drm_crtc_cleanup(struct drm_crtc *crtc); |
extern void drm_connector_init(struct drm_device *dev, |
struct drm_connector *connector, |
const struct drm_connector_funcs *funcs, |
int connector_type); |
extern void drm_connector_cleanup(struct drm_connector *connector); |
extern void drm_encoder_init(struct drm_device *dev, |
struct drm_encoder *encoder, |
const struct drm_encoder_funcs *funcs, |
int encoder_type); |
extern void drm_encoder_cleanup(struct drm_encoder *encoder); |
extern char *drm_get_connector_name(struct drm_connector *connector); |
extern char *drm_get_dpms_name(int val); |
extern char *drm_get_dvi_i_subconnector_name(int val); |
extern char *drm_get_dvi_i_select_name(int val); |
extern char *drm_get_tv_subconnector_name(int val); |
extern char *drm_get_tv_select_name(int val); |
//extern void drm_fb_release(struct drm_file *file_priv); |
//extern int drm_mode_group_init_legacy_group(struct drm_device *dev, struct drm_mode_group *group); |
//extern struct edid *drm_get_edid(struct drm_connector *connector, |
// struct i2c_adapter *adapter); |
//extern int drm_do_probe_ddc_edid(struct i2c_adapter *adapter, |
// unsigned char *buf, int len); |
//extern int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
extern void drm_mode_probed_add(struct drm_connector *connector, struct drm_display_mode *mode); |
extern void drm_mode_remove(struct drm_connector *connector, struct drm_display_mode *mode); |
extern struct drm_display_mode *drm_mode_duplicate(struct drm_device *dev, |
struct drm_display_mode *mode); |
extern void drm_mode_debug_printmodeline(struct drm_display_mode *mode); |
extern void drm_mode_config_init(struct drm_device *dev); |
extern void drm_mode_config_cleanup(struct drm_device *dev); |
extern void drm_mode_set_name(struct drm_display_mode *mode); |
extern bool drm_mode_equal(struct drm_display_mode *mode1, struct drm_display_mode *mode2); |
extern int drm_mode_width(struct drm_display_mode *mode); |
extern int drm_mode_height(struct drm_display_mode *mode); |
/* for us by fb module */ |
extern int drm_mode_attachmode_crtc(struct drm_device *dev, |
struct drm_crtc *crtc, |
struct drm_display_mode *mode); |
extern int drm_mode_detachmode_crtc(struct drm_device *dev, struct drm_display_mode *mode); |
extern struct drm_display_mode *drm_mode_create(struct drm_device *dev); |
extern void drm_mode_destroy(struct drm_device *dev, struct drm_display_mode *mode); |
//extern void drm_mode_list_concat(struct list_head *head, |
// struct list_head *new); |
//extern void drm_mode_validate_size(struct drm_device *dev, |
// struct list_head *mode_list, |
// int maxX, int maxY, int maxPitch); |
//extern void drm_mode_prune_invalid(struct drm_device *dev, |
// struct list_head *mode_list, bool verbose); |
//extern void drm_mode_sort(struct list_head *mode_list); |
extern int drm_mode_vrefresh(struct drm_display_mode *mode); |
extern void drm_mode_set_crtcinfo(struct drm_display_mode *p, |
int adjust_flags); |
extern void drm_mode_connector_list_update(struct drm_connector *connector); |
//extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, |
// struct edid *edid); |
extern int drm_connector_property_set_value(struct drm_connector *connector, |
struct drm_property *property, |
uint64_t value); |
extern int drm_connector_property_get_value(struct drm_connector *connector, |
struct drm_property *property, |
uint64_t *value); |
extern struct drm_display_mode *drm_crtc_mode_create(struct drm_device *dev); |
extern void drm_framebuffer_set_object(struct drm_device *dev, |
unsigned long handle); |
extern int drm_framebuffer_init(struct drm_device *dev, |
struct drm_framebuffer *fb, |
const struct drm_framebuffer_funcs *funcs); |
extern void drm_framebuffer_cleanup(struct drm_framebuffer *fb); |
extern int drmfb_probe(struct drm_device *dev, struct drm_crtc *crtc); |
extern int drmfb_remove(struct drm_device *dev, struct drm_framebuffer *fb); |
extern void drm_crtc_probe_connector_modes(struct drm_device *dev, int maxX, int maxY); |
extern bool drm_crtc_in_use(struct drm_crtc *crtc); |
extern int drm_connector_attach_property(struct drm_connector *connector, |
struct drm_property *property, uint64_t init_val); |
extern struct drm_property *drm_property_create(struct drm_device *dev, int flags, |
const char *name, int num_values); |
extern void drm_property_destroy(struct drm_device *dev, struct drm_property *property); |
extern int drm_property_add_enum(struct drm_property *property, int index, |
uint64_t value, const char *name); |
extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); |
extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, |
char *formats[]); |
extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); |
extern int drm_mode_create_dithering_property(struct drm_device *dev); |
extern char *drm_get_encoder_name(struct drm_encoder *encoder); |
extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
extern void drm_mode_connector_detach_encoder(struct drm_connector *connector, |
struct drm_encoder *encoder); |
extern bool drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, |
int gamma_size); |
extern void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type); |
/* IOCTLs */ |
extern int drm_mode_getresources(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getcrtc(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getconnector(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_setcrtc(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_cursor_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_addfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_rmfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getfb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_addmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_rmmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_attachmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_detachmode_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getproperty_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getblob_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_connector_property_set_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_hotplug_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_replacefb(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_getencoder(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_gamma_get_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
extern int drm_mode_gamma_set_ioctl(struct drm_device *dev, |
void *data, struct drm_file *file_priv); |
//extern bool drm_detect_hdmi_monitor(struct edid *edid); |
#endif /* __DRM_CRTC_H__ */ |
/drivers/video/drm/include/linux/sched.h |
---|
File deleted |
/drivers/video/drm/include/linux/module.h |
---|
File deleted |
/drivers/video/drm/include/linux/types.h |
---|
File deleted |
/drivers/video/drm/include/linux/i2c-id.h |
---|
File deleted |
/drivers/video/drm/include/linux/list.h |
---|
File deleted |
/drivers/video/drm/include/linux/fb.h |
---|
File deleted |
/drivers/video/drm/include/linux/seq_file.h |
---|
File deleted |
/drivers/video/drm/include/drm.h |
---|
36,7 → 36,7 |
#ifndef _DRM_H_ |
#define _DRM_H_ |
#include <linux/types.h> |
#include <types.h> |
#include <errno-base.h> |
//#include <asm/ioctl.h> /* For _IO* macros */ |
741,7 → 741,4 |
typedef struct drm_set_version drm_set_version_t; |
#endif |
#define mutex_lock(x) |
#define mutex_unlock(x) |
#endif |
/drivers/video/drm/include/drm_crtc_helper.h |
---|
0,0 → 1,126 |
/* |
* Copyright © 2006 Keith Packard |
* Copyright © 2007-2008 Dave Airlie |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
* OTHER DEALINGS IN THE SOFTWARE. |
*/ |
/* |
* The DRM mode setting helper functions are common code for drivers to use if |
* they wish. Drivers are not forced to use this code in their |
* implementations but it would be useful if they code they do use at least |
* provides a consistent interface and operation to userspace |
*/ |
#ifndef __DRM_CRTC_HELPER_H__ |
#define __DRM_CRTC_HELPER_H__ |
//#include <linux/spinlock.h> |
//#include <linux/types.h> |
//#include <linux/idr.h> |
//#include <linux/fb.h> |
struct drm_crtc_helper_funcs { |
/* |
* Control power levels on the CRTC. If the mode passed in is |
* unsupported, the provider must use the next lowest power level. |
*/ |
void (*dpms)(struct drm_crtc *crtc, int mode); |
void (*prepare)(struct drm_crtc *crtc); |
void (*commit)(struct drm_crtc *crtc); |
/* Provider can fixup or change mode timings before modeset occurs */ |
bool (*mode_fixup)(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
/* Actually set the mode */ |
int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode, int x, int y, |
struct drm_framebuffer *old_fb); |
/* Move the crtc on the current fb to the given position *optional* */ |
int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, |
struct drm_framebuffer *old_fb); |
}; |
struct drm_encoder_helper_funcs { |
void (*dpms)(struct drm_encoder *encoder, int mode); |
void (*save)(struct drm_encoder *encoder); |
void (*restore)(struct drm_encoder *encoder); |
bool (*mode_fixup)(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
void (*prepare)(struct drm_encoder *encoder); |
void (*commit)(struct drm_encoder *encoder); |
void (*mode_set)(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder); |
/* detect for DAC style encoders */ |
enum drm_connector_status (*detect)(struct drm_encoder *encoder, |
struct drm_connector *connector); |
}; |
struct drm_connector_helper_funcs { |
int (*get_modes)(struct drm_connector *connector); |
int (*mode_valid)(struct drm_connector *connector, |
struct drm_display_mode *mode); |
struct drm_encoder *(*best_encoder)(struct drm_connector *connector); |
}; |
extern int drm_helper_probe_single_connector_modes(struct drm_connector *connector, uint32_t maxX, uint32_t maxY); |
extern void drm_helper_disable_unused_functions(struct drm_device *dev); |
extern int drm_helper_hotplug_stage_two(struct drm_device *dev); |
extern bool drm_helper_initial_config(struct drm_device *dev); |
extern int drm_crtc_helper_set_config(struct drm_mode_set *set); |
extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
int x, int y, |
struct drm_framebuffer *old_fb); |
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc); |
extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode); |
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb, |
struct drm_mode_fb_cmd *mode_cmd); |
static inline void drm_crtc_helper_add(struct drm_crtc *crtc, |
const struct drm_crtc_helper_funcs *funcs) |
{ |
crtc->helper_private = (void *)funcs; |
} |
static inline void drm_encoder_helper_add(struct drm_encoder *encoder, |
const struct drm_encoder_helper_funcs *funcs) |
{ |
encoder->helper_private = (void *)funcs; |
} |
static inline void drm_connector_helper_add(struct drm_connector *connector, |
const struct drm_connector_helper_funcs *funcs) |
{ |
connector->helper_private = (void *)funcs; |
} |
extern int drm_helper_resume_force_mode(struct drm_device *dev); |
#endif |
/drivers/video/drm/include/list.h |
---|
0,0 → 1,703 |
#ifndef _LINUX_LIST_H |
#define _LINUX_LIST_H |
//#include <linux/stddef.h> |
//#include <linux/poison.h> |
//#include <linux/prefetch.h> |
//#include <asm/system.h> |
/* |
* Simple doubly linked list implementation. |
* |
* Some of the internal functions ("__xxx") are useful when |
* manipulating whole lists rather than single entries, as |
* sometimes we already know the next/prev entries and we can |
* generate better code by using them directly rather than |
* using the generic single-entry routines. |
*/ |
#define LIST_POISON1 ((struct list_head*)0xFFFF0100) |
#define LIST_POISON2 ((struct list_head*)0xFFFF0200) |
#define prefetch(x) __builtin_prefetch(x) |
struct list_head { |
struct list_head *next, *prev; |
}; |
#define LIST_HEAD_INIT(name) { &(name), &(name) } |
#define LIST_HEAD(name) \ |
struct list_head name = LIST_HEAD_INIT(name) |
static inline void INIT_LIST_HEAD(struct list_head *list) |
{ |
list->next = list; |
list->prev = list; |
} |
/* |
* Insert a new entry between two known consecutive entries. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void __list_add(struct list_head *new, |
struct list_head *prev, |
struct list_head *next) |
{ |
next->prev = new; |
new->next = next; |
new->prev = prev; |
prev->next = new; |
} |
#else |
extern void __list_add(struct list_head *new, |
struct list_head *prev, |
struct list_head *next); |
#endif |
/** |
* list_add - add a new entry |
* @new: new entry to be added |
* @head: list head to add it after |
* |
* Insert a new entry after the specified head. |
* This is good for implementing stacks. |
*/ |
static inline void list_add(struct list_head *new, struct list_head *head) |
{ |
__list_add(new, head, head->next); |
} |
/** |
* list_add_tail - add a new entry |
* @new: new entry to be added |
* @head: list head to add it before |
* |
* Insert a new entry before the specified head. |
* This is useful for implementing queues. |
*/ |
static inline void list_add_tail(struct list_head *new, struct list_head *head) |
{ |
__list_add(new, head->prev, head); |
} |
/* |
* Delete a list entry by making the prev/next entries |
* point to each other. |
* |
* This is only for internal list manipulation where we know |
* the prev/next entries already! |
*/ |
static inline void __list_del(struct list_head * prev, struct list_head * next) |
{ |
next->prev = prev; |
prev->next = next; |
} |
/** |
* list_del - deletes entry from list. |
* @entry: the element to delete from the list. |
* Note: list_empty() on entry does not return true after this, the entry is |
* in an undefined state. |
*/ |
#ifndef CONFIG_DEBUG_LIST |
static inline void list_del(struct list_head *entry) |
{ |
__list_del(entry->prev, entry->next); |
entry->next = LIST_POISON1; |
entry->prev = LIST_POISON2; |
} |
#else |
extern void list_del(struct list_head *entry); |
#endif |
/** |
* list_replace - replace old entry by new one |
* @old : the element to be replaced |
* @new : the new element to insert |
* |
* If @old was empty, it will be overwritten. |
*/ |
static inline void list_replace(struct list_head *old, |
struct list_head *new) |
{ |
new->next = old->next; |
new->next->prev = new; |
new->prev = old->prev; |
new->prev->next = new; |
} |
static inline void list_replace_init(struct list_head *old, |
struct list_head *new) |
{ |
list_replace(old, new); |
INIT_LIST_HEAD(old); |
} |
/** |
* list_del_init - deletes entry from list and reinitialize it. |
* @entry: the element to delete from the list. |
*/ |
static inline void list_del_init(struct list_head *entry) |
{ |
__list_del(entry->prev, entry->next); |
INIT_LIST_HEAD(entry); |
} |
/** |
* list_move - delete from one list and add as another's head |
* @list: the entry to move |
* @head: the head that will precede our entry |
*/ |
static inline void list_move(struct list_head *list, struct list_head *head) |
{ |
__list_del(list->prev, list->next); |
list_add(list, head); |
} |
/** |
* list_move_tail - delete from one list and add as another's tail |
* @list: the entry to move |
* @head: the head that will follow our entry |
*/ |
static inline void list_move_tail(struct list_head *list, |
struct list_head *head) |
{ |
__list_del(list->prev, list->next); |
list_add_tail(list, head); |
} |
/** |
* list_is_last - tests whether @list is the last entry in list @head |
* @list: the entry to test |
* @head: the head of the list |
*/ |
static inline int list_is_last(const struct list_head *list, |
const struct list_head *head) |
{ |
return list->next == head; |
} |
/** |
* list_empty - tests whether a list is empty |
* @head: the list to test. |
*/ |
static inline int list_empty(const struct list_head *head) |
{ |
return head->next == head; |
} |
/** |
* list_empty_careful - tests whether a list is empty and not being modified |
* @head: the list to test |
* |
* Description: |
* tests whether a list is empty _and_ checks that no other CPU might be |
* in the process of modifying either member (next or prev) |
* |
* NOTE: using list_empty_careful() without synchronization |
* can only be safe if the only activity that can happen |
* to the list entry is list_del_init(). Eg. it cannot be used |
* if another CPU could re-list_add() it. |
*/ |
static inline int list_empty_careful(const struct list_head *head) |
{ |
struct list_head *next = head->next; |
return (next == head) && (next == head->prev); |
} |
/** |
* list_is_singular - tests whether a list has just one entry. |
* @head: the list to test. |
*/ |
static inline int list_is_singular(const struct list_head *head) |
{ |
return !list_empty(head) && (head->next == head->prev); |
} |
static inline void __list_cut_position(struct list_head *list, |
struct list_head *head, struct list_head *entry) |
{ |
struct list_head *new_first = entry->next; |
list->next = head->next; |
list->next->prev = list; |
list->prev = entry; |
entry->next = list; |
head->next = new_first; |
new_first->prev = head; |
} |
/** |
* list_cut_position - cut a list into two |
* @list: a new list to add all removed entries |
* @head: a list with entries |
* @entry: an entry within head, could be the head itself |
* and if so we won't cut the list |
* |
* This helper moves the initial part of @head, up to and |
* including @entry, from @head to @list. You should |
* pass on @entry an element you know is on @head. @list |
* should be an empty list or a list you do not care about |
* losing its data. |
* |
*/ |
static inline void list_cut_position(struct list_head *list, |
struct list_head *head, struct list_head *entry) |
{ |
if (list_empty(head)) |
return; |
if (list_is_singular(head) && |
(head->next != entry && head != entry)) |
return; |
if (entry == head) |
INIT_LIST_HEAD(list); |
else |
__list_cut_position(list, head, entry); |
} |
static inline void __list_splice(const struct list_head *list, |
struct list_head *prev, |
struct list_head *next) |
{ |
struct list_head *first = list->next; |
struct list_head *last = list->prev; |
first->prev = prev; |
prev->next = first; |
last->next = next; |
next->prev = last; |
} |
/** |
* list_splice - join two lists, this is designed for stacks |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
*/ |
static inline void list_splice(const struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) |
__list_splice(list, head, head->next); |
} |
/** |
* list_splice_tail - join two lists, each list being a queue |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
*/ |
static inline void list_splice_tail(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) |
__list_splice(list, head->prev, head); |
} |
/** |
* list_splice_init - join two lists and reinitialise the emptied list. |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
* |
* The list at @list is reinitialised |
*/ |
static inline void list_splice_init(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) { |
__list_splice(list, head, head->next); |
INIT_LIST_HEAD(list); |
} |
} |
/** |
* list_splice_tail_init - join two lists and reinitialise the emptied list |
* @list: the new list to add. |
* @head: the place to add it in the first list. |
* |
* Each of the lists is a queue. |
* The list at @list is reinitialised |
*/ |
static inline void list_splice_tail_init(struct list_head *list, |
struct list_head *head) |
{ |
if (!list_empty(list)) { |
__list_splice(list, head->prev, head); |
INIT_LIST_HEAD(list); |
} |
} |
/** |
* list_entry - get the struct for this entry |
* @ptr: the &struct list_head pointer. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_entry(ptr, type, member) \ |
container_of(ptr, type, member) |
/** |
* list_first_entry - get the first element from a list |
* @ptr: the list head to take the element from. |
* @type: the type of the struct this is embedded in. |
* @member: the name of the list_struct within the struct. |
* |
* Note, that list is expected to be not empty. |
*/ |
#define list_first_entry(ptr, type, member) \ |
list_entry((ptr)->next, type, member) |
/** |
* list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
*/ |
#define list_for_each(pos, head) \ |
for (pos = (head)->next; prefetch(pos->next), pos != (head); \ |
pos = pos->next) |
/** |
* __list_for_each - iterate over a list |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
* |
* This variant differs from list_for_each() in that it's the |
* simplest possible list iteration code, no prefetching is done. |
* Use this for code that knows the list to be very short (empty |
* or 1 entry) most of the time. |
*/ |
#define __list_for_each(pos, head) \ |
for (pos = (head)->next; pos != (head); pos = pos->next) |
/** |
* list_for_each_prev - iterate over a list backwards |
* @pos: the &struct list_head to use as a loop cursor. |
* @head: the head for your list. |
*/ |
#define list_for_each_prev(pos, head) \ |
for (pos = (head)->prev; prefetch(pos->prev), pos != (head); \ |
pos = pos->prev) |
/** |
* list_for_each_safe - iterate over a list safe against removal of list entry |
* @pos: the &struct list_head to use as a loop cursor. |
* @n: another &struct list_head to use as temporary storage |
* @head: the head for your list. |
*/ |
#define list_for_each_safe(pos, n, head) \ |
for (pos = (head)->next, n = pos->next; pos != (head); \ |
pos = n, n = pos->next) |
/** |
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry |
* @pos: the &struct list_head to use as a loop cursor. |
* @n: another &struct list_head to use as temporary storage |
* @head: the head for your list. |
*/ |
#define list_for_each_prev_safe(pos, n, head) \ |
for (pos = (head)->prev, n = pos->prev; \ |
prefetch(pos->prev), pos != (head); \ |
pos = n, n = pos->prev) |
/** |
* list_for_each_entry - iterate over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry(pos, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_reverse - iterate backwards over list of given type. |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_reverse(pos, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member); \ |
prefetch(pos->member.prev), &pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
/** |
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() |
* @pos: the type * to use as a start point |
* @head: the head of the list |
* @member: the name of the list_struct within the struct. |
* |
* Prepares a pos entry for use as a start point in list_for_each_entry_continue(). |
*/ |
#define list_prepare_entry(pos, head, member) \ |
((pos) ? : list_entry(head, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue - continue iteration over list of given type |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Continue to iterate over list of given type, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue(pos, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member); \ |
prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_continue_reverse - iterate backwards from the given point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Start to iterate over list of given type backwards, continuing after |
* the current position. |
*/ |
#define list_for_each_entry_continue_reverse(pos, head, member) \ |
for (pos = list_entry(pos->member.prev, typeof(*pos), member); \ |
prefetch(pos->member.prev), &pos->member != (head); \ |
pos = list_entry(pos->member.prev, typeof(*pos), member)) |
/** |
* list_for_each_entry_from - iterate over list of given type from the current point |
* @pos: the type * to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing from current position. |
*/ |
#define list_for_each_entry_from(pos, head, member) \ |
for (; prefetch(pos->member.next), &pos->member != (head); \ |
pos = list_entry(pos->member.next, typeof(*pos), member)) |
/** |
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
*/ |
#define list_for_each_entry_safe(pos, n, head, member) \ |
for (pos = list_entry((head)->next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_continue |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type, continuing after current point, |
* safe against removal of list entry. |
*/ |
#define list_for_each_entry_safe_continue(pos, n, head, member) \ |
for (pos = list_entry(pos->member.next, typeof(*pos), member), \ |
n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_from |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate over list of given type from current point, safe against |
* removal of list entry. |
*/ |
#define list_for_each_entry_safe_from(pos, n, head, member) \ |
for (n = list_entry(pos->member.next, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.next, typeof(*n), member)) |
/** |
* list_for_each_entry_safe_reverse |
* @pos: the type * to use as a loop cursor. |
* @n: another type * to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the list_struct within the struct. |
* |
* Iterate backwards over list of given type, safe against removal |
* of list entry. |
*/ |
#define list_for_each_entry_safe_reverse(pos, n, head, member) \ |
for (pos = list_entry((head)->prev, typeof(*pos), member), \ |
n = list_entry(pos->member.prev, typeof(*pos), member); \ |
&pos->member != (head); \ |
pos = n, n = list_entry(n->member.prev, typeof(*n), member)) |
/* |
* Double linked lists with a single pointer list head. |
* Mostly useful for hash tables where the two pointer list head is |
* too wasteful. |
* You lose the ability to access the tail in O(1). |
*/ |
#if 0 |
struct hlist_head { |
struct hlist_node *first; |
}; |
struct hlist_node { |
struct hlist_node *next, **pprev; |
}; |
#define HLIST_HEAD_INIT { .first = NULL } |
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } |
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) |
static inline void INIT_HLIST_NODE(struct hlist_node *h) |
{ |
h->next = NULL; |
h->pprev = NULL; |
} |
static inline int hlist_unhashed(const struct hlist_node *h) |
{ |
return !h->pprev; |
} |
static inline int hlist_empty(const struct hlist_head *h) |
{ |
return !h->first; |
} |
static inline void __hlist_del(struct hlist_node *n) |
{ |
struct hlist_node *next = n->next; |
struct hlist_node **pprev = n->pprev; |
*pprev = next; |
if (next) |
next->pprev = pprev; |
} |
static inline void hlist_del(struct hlist_node *n) |
{ |
__hlist_del(n); |
n->next = LIST_POISON1; |
n->pprev = LIST_POISON2; |
} |
static inline void hlist_del_init(struct hlist_node *n) |
{ |
if (!hlist_unhashed(n)) { |
__hlist_del(n); |
INIT_HLIST_NODE(n); |
} |
} |
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) |
{ |
struct hlist_node *first = h->first; |
n->next = first; |
if (first) |
first->pprev = &n->next; |
h->first = n; |
n->pprev = &h->first; |
} |
/* next must be != NULL */ |
static inline void hlist_add_before(struct hlist_node *n, |
struct hlist_node *next) |
{ |
n->pprev = next->pprev; |
n->next = next; |
next->pprev = &n->next; |
*(n->pprev) = n; |
} |
static inline void hlist_add_after(struct hlist_node *n, |
struct hlist_node *next) |
{ |
next->next = n->next; |
n->next = next; |
next->pprev = &n->next; |
if(next->next) |
next->next->pprev = &next->next; |
} |
/* |
* Move a list from one list head to another. Fixup the pprev |
* reference of the first entry if it exists. |
*/ |
static inline void hlist_move_list(struct hlist_head *old, |
struct hlist_head *new) |
{ |
new->first = old->first; |
if (new->first) |
new->first->pprev = &new->first; |
old->first = NULL; |
} |
#define hlist_entry(ptr, type, member) container_of(ptr,type,member) |
#define hlist_for_each(pos, head) \ |
for (pos = (head)->first; pos && ({ prefetch(pos->next); 1; }); \ |
pos = pos->next) |
#define hlist_for_each_safe(pos, n, head) \ |
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \ |
pos = n) |
/** |
* hlist_for_each_entry - iterate over list of given type |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry(tpos, pos, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_continue(tpos, pos, member) \ |
for (pos = (pos)->next; \ |
pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_from - iterate over a hlist continuing from current point |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_from(tpos, pos, member) \ |
for (; pos && ({ prefetch(pos->next); 1;}) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = pos->next) |
/** |
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry |
* @tpos: the type * to use as a loop cursor. |
* @pos: the &struct hlist_node to use as a loop cursor. |
* @n: another &struct hlist_node to use as temporary storage |
* @head: the head for your list. |
* @member: the name of the hlist_node within the struct. |
*/ |
#define hlist_for_each_entry_safe(tpos, pos, n, head, member) \ |
for (pos = (head)->first; \ |
pos && ({ n = pos->next; 1; }) && \ |
({ tpos = hlist_entry(pos, typeof(*tpos), member); 1;}); \ |
pos = n) |
#endif |
#endif |
/drivers/video/drm/include/drm_mode.h |
---|
0,0 → 1,268 |
/* |
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie> |
* Copyright (c) 2007 Jakob Bornecrantz <wallbraker@gmail.com> |
* Copyright (c) 2008 Red Hat Inc. |
* Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA |
* Copyright (c) 2007-2008 Intel Corporation |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice shall be included in |
* all copies or substantial portions of the Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
* IN THE SOFTWARE. |
*/ |
#ifndef _DRM_MODE_H |
#define _DRM_MODE_H |
//#include <linux/kernel.h> |
//#include <linux/types.h> |
#define DRM_DISPLAY_INFO_LEN 32 |
#define DRM_CONNECTOR_NAME_LEN 32 |
#define DRM_DISPLAY_MODE_LEN 32 |
#define DRM_PROP_NAME_LEN 32 |
#define DRM_MODE_TYPE_BUILTIN (1<<0) |
#define DRM_MODE_TYPE_CLOCK_C ((1<<1) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_CRTC_C ((1<<2) | DRM_MODE_TYPE_BUILTIN) |
#define DRM_MODE_TYPE_PREFERRED (1<<3) |
#define DRM_MODE_TYPE_DEFAULT (1<<4) |
#define DRM_MODE_TYPE_USERDEF (1<<5) |
#define DRM_MODE_TYPE_DRIVER (1<<6) |
/* Video mode flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_FLAG_PHSYNC (1<<0) |
#define DRM_MODE_FLAG_NHSYNC (1<<1) |
#define DRM_MODE_FLAG_PVSYNC (1<<2) |
#define DRM_MODE_FLAG_NVSYNC (1<<3) |
#define DRM_MODE_FLAG_INTERLACE (1<<4) |
#define DRM_MODE_FLAG_DBLSCAN (1<<5) |
#define DRM_MODE_FLAG_CSYNC (1<<6) |
#define DRM_MODE_FLAG_PCSYNC (1<<7) |
#define DRM_MODE_FLAG_NCSYNC (1<<8) |
#define DRM_MODE_FLAG_HSKEW (1<<9) /* hskew provided */ |
#define DRM_MODE_FLAG_BCAST (1<<10) |
#define DRM_MODE_FLAG_PIXMUX (1<<11) |
#define DRM_MODE_FLAG_DBLCLK (1<<12) |
#define DRM_MODE_FLAG_CLKDIV2 (1<<13) |
/* DPMS flags */ |
/* bit compatible with the xorg definitions. */ |
#define DRM_MODE_DPMS_ON 0 |
#define DRM_MODE_DPMS_STANDBY 1 |
#define DRM_MODE_DPMS_SUSPEND 2 |
#define DRM_MODE_DPMS_OFF 3 |
/* Scaling mode options */ |
#define DRM_MODE_SCALE_NON_GPU 0 |
#define DRM_MODE_SCALE_FULLSCREEN 1 |
#define DRM_MODE_SCALE_NO_SCALE 2 |
#define DRM_MODE_SCALE_ASPECT 3 |
/* Dithering mode options */ |
#define DRM_MODE_DITHERING_OFF 0 |
#define DRM_MODE_DITHERING_ON 1 |
struct drm_mode_modeinfo { |
__u32 clock; |
__u16 hdisplay, hsync_start, hsync_end, htotal, hskew; |
__u16 vdisplay, vsync_start, vsync_end, vtotal, vscan; |
__u32 vrefresh; /* vertical refresh * 1000 */ |
__u32 flags; |
__u32 type; |
char name[DRM_DISPLAY_MODE_LEN]; |
}; |
struct drm_mode_card_res { |
__u64 fb_id_ptr; |
__u64 crtc_id_ptr; |
__u64 connector_id_ptr; |
__u64 encoder_id_ptr; |
__u32 count_fbs; |
__u32 count_crtcs; |
__u32 count_connectors; |
__u32 count_encoders; |
__u32 min_width, max_width; |
__u32 min_height, max_height; |
}; |
struct drm_mode_crtc { |
__u64 set_connectors_ptr; |
__u32 count_connectors; |
__u32 crtc_id; /**< Id */ |
__u32 fb_id; /**< Id of framebuffer */ |
__u32 x, y; /**< Position on the frameuffer */ |
__u32 gamma_size; |
__u32 mode_valid; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_ENCODER_NONE 0 |
#define DRM_MODE_ENCODER_DAC 1 |
#define DRM_MODE_ENCODER_TMDS 2 |
#define DRM_MODE_ENCODER_LVDS 3 |
#define DRM_MODE_ENCODER_TVDAC 4 |
struct drm_mode_get_encoder { |
__u32 encoder_id; |
__u32 encoder_type; |
__u32 crtc_id; /**< Id of crtc */ |
__u32 possible_crtcs; |
__u32 possible_clones; |
}; |
/* This is for connectors with multiple signal types. */ |
/* Try to match DRM_MODE_CONNECTOR_X as closely as possible. */ |
#define DRM_MODE_SUBCONNECTOR_Automatic 0 |
#define DRM_MODE_SUBCONNECTOR_Unknown 0 |
#define DRM_MODE_SUBCONNECTOR_DVID 3 |
#define DRM_MODE_SUBCONNECTOR_DVIA 4 |
#define DRM_MODE_SUBCONNECTOR_Composite 5 |
#define DRM_MODE_SUBCONNECTOR_SVIDEO 6 |
#define DRM_MODE_SUBCONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_Unknown 0 |
#define DRM_MODE_CONNECTOR_VGA 1 |
#define DRM_MODE_CONNECTOR_DVII 2 |
#define DRM_MODE_CONNECTOR_DVID 3 |
#define DRM_MODE_CONNECTOR_DVIA 4 |
#define DRM_MODE_CONNECTOR_Composite 5 |
#define DRM_MODE_CONNECTOR_SVIDEO 6 |
#define DRM_MODE_CONNECTOR_LVDS 7 |
#define DRM_MODE_CONNECTOR_Component 8 |
#define DRM_MODE_CONNECTOR_9PinDIN 9 |
#define DRM_MODE_CONNECTOR_DisplayPort 10 |
#define DRM_MODE_CONNECTOR_HDMIA 11 |
#define DRM_MODE_CONNECTOR_HDMIB 12 |
struct drm_mode_get_connector { |
__u64 encoders_ptr; |
__u64 modes_ptr; |
__u64 props_ptr; |
__u64 prop_values_ptr; |
__u32 count_modes; |
__u32 count_props; |
__u32 count_encoders; |
__u32 encoder_id; /**< Current Encoder */ |
__u32 connector_id; /**< Id */ |
__u32 connector_type; |
__u32 connector_type_id; |
__u32 connection; |
__u32 mm_width, mm_height; /**< HxW in millimeters */ |
__u32 subpixel; |
}; |
#define DRM_MODE_PROP_PENDING (1<<0) |
#define DRM_MODE_PROP_RANGE (1<<1) |
#define DRM_MODE_PROP_IMMUTABLE (1<<2) |
#define DRM_MODE_PROP_ENUM (1<<3) /* enumerated type with text strings */ |
#define DRM_MODE_PROP_BLOB (1<<4) |
struct drm_mode_property_enum { |
__u64 value; |
char name[DRM_PROP_NAME_LEN]; |
}; |
struct drm_mode_get_property { |
__u64 values_ptr; /* values and blob lengths */ |
__u64 enum_blob_ptr; /* enum and blob id ptrs */ |
__u32 prop_id; |
__u32 flags; |
char name[DRM_PROP_NAME_LEN]; |
__u32 count_values; |
__u32 count_enum_blobs; |
}; |
struct drm_mode_connector_set_property { |
__u64 value; |
__u32 prop_id; |
__u32 connector_id; |
}; |
struct drm_mode_get_blob { |
__u32 blob_id; |
__u32 length; |
__u64 data; |
}; |
struct drm_mode_fb_cmd { |
__u32 fb_id; |
__u32 width, height; |
__u32 pitch; |
__u32 bpp; |
__u32 depth; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_mode_cmd { |
__u32 connector_id; |
struct drm_mode_modeinfo mode; |
}; |
#define DRM_MODE_CURSOR_BO (1<<0) |
#define DRM_MODE_CURSOR_MOVE (1<<1) |
/* |
* depending on the value in flags diffrent members are used. |
* |
* CURSOR_BO uses |
* crtc |
* width |
* height |
* handle - if 0 turns the cursor of |
* |
* CURSOR_MOVE uses |
* crtc |
* x |
* y |
*/ |
struct drm_mode_cursor { |
__u32 flags; |
__u32 crtc_id; |
__s32 x; |
__s32 y; |
__u32 width; |
__u32 height; |
/* driver specific handle */ |
__u32 handle; |
}; |
struct drm_mode_crtc_lut { |
__u32 crtc_id; |
__u32 gamma_size; |
/* pointers to arrays */ |
__u64 red; |
__u64 green; |
__u64 blue; |
}; |
#endif |
/drivers/video/drm/include/syscall.h |
---|
21,7 → 21,7 |
#define ERR_PARAM -1 |
u32_t drvEntry(int, char *)__asm__("_drvEntry"); |
u32_t __stdcall drvEntry(int)__asm__("_drvEntry"); |
/////////////////////////////////////////////////////////////////////////////// |
/drivers/video/drm/radeon/r100d.h |
---|
File deleted |
/drivers/video/drm/radeon/r300d.h |
---|
File deleted |
/drivers/video/drm/radeon/radeon_family.h |
---|
File deleted |
/drivers/video/drm/radeon/rs600_reg_safe.h |
---|
File deleted |
/drivers/video/drm/radeon/rv515_reg_safe.h |
---|
File deleted |
/drivers/video/drm/radeon/avivod.h |
---|
File deleted |
/drivers/video/drm/radeon/r420.c |
---|
File deleted |
/drivers/video/drm/radeon/rn50_reg_safe.h |
---|
File deleted |
/drivers/video/drm/radeon/radeon_cursor.c |
---|
File deleted |
/drivers/video/drm/radeon/display.h |
---|
File deleted |
/drivers/video/drm/radeon/rs690r.h |
---|
File deleted |
/drivers/video/drm/radeon/rv515d.h |
---|
File deleted |
/drivers/video/drm/radeon/radeon_legacy_tv.c |
---|
File deleted |
/drivers/video/drm/radeon/r200.c |
---|
File deleted |
/drivers/video/drm/radeon/r100_reg_safe.h |
---|
File deleted |
/drivers/video/drm/radeon/r200_reg_safe.h |
---|
File deleted |
/drivers/video/drm/radeon/r300_reg_safe.h |
---|
File deleted |
/drivers/video/drm/radeon/r420d.h |
---|
File deleted |
/drivers/video/drm/radeon/radeon_share.h |
---|
File deleted |
/drivers/video/drm/radeon/atombios_crtc.c |
---|
23,159 → 23,14 |
* Authors: Dave Airlie |
* Alex Deucher |
*/ |
#include <drm/drmP.h> |
#include <drm/drm_crtc_helper.h> |
#include <drm/radeon_drm.h> |
#include <drmP.h> |
#include <drm_crtc_helper.h> |
#include "radeon_drm.h" |
#include "radeon_fixed.h" |
#include "radeon.h" |
#include "atom.h" |
#include "atom-bits.h" |
/* evil but including atombios.h is much worse */ |
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, |
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing, |
int32_t *pixel_clock); |
static void atombios_overscan_setup(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
SET_CRTC_OVERSCAN_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); |
int a1, a2; |
memset(&args, 0, sizeof(args)); |
args.usOverscanRight = 0; |
args.usOverscanLeft = 0; |
args.usOverscanBottom = 0; |
args.usOverscanTop = 0; |
args.ucCRTC = radeon_crtc->crtc_id; |
switch (radeon_crtc->rmx_type) { |
case RMX_CENTER: |
args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; |
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; |
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
break; |
case RMX_ASPECT: |
a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; |
a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; |
if (a1 > a2) { |
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; |
} else if (a2 > a1) { |
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
break; |
case RMX_FULL: |
default: |
args.usOverscanRight = 0; |
args.usOverscanLeft = 0; |
args.usOverscanBottom = 0; |
args.usOverscanTop = 0; |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
break; |
} |
} |
static void atombios_scaler_setup(struct drm_crtc *crtc) |
{ |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
ENABLE_SCALER_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
bool is_tv = false, is_cv = false; |
struct drm_encoder *encoder; |
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) |
return; |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
/* find tv std */ |
if (encoder->crtc == crtc) { |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { |
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; |
tv_std = tv_dac->tv_std; |
is_tv = true; |
} |
} |
} |
memset(&args, 0, sizeof(args)); |
args.ucScaler = radeon_crtc->crtc_id; |
if (is_tv) { |
switch (tv_std) { |
case TV_STD_NTSC: |
default: |
args.ucTVStandard = ATOM_TV_NTSC; |
break; |
case TV_STD_PAL: |
args.ucTVStandard = ATOM_TV_PAL; |
break; |
case TV_STD_PAL_M: |
args.ucTVStandard = ATOM_TV_PALM; |
break; |
case TV_STD_PAL_60: |
args.ucTVStandard = ATOM_TV_PAL60; |
break; |
case TV_STD_NTSC_J: |
args.ucTVStandard = ATOM_TV_NTSCJ; |
break; |
case TV_STD_SCART_PAL: |
args.ucTVStandard = ATOM_TV_PAL; /* ??? */ |
break; |
case TV_STD_SECAM: |
args.ucTVStandard = ATOM_TV_SECAM; |
break; |
case TV_STD_PAL_CN: |
args.ucTVStandard = ATOM_TV_PALCN; |
break; |
} |
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; |
} else if (is_cv) { |
args.ucTVStandard = ATOM_TV_CV; |
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; |
} else { |
switch (radeon_crtc->rmx_type) { |
case RMX_FULL: |
args.ucEnable = ATOM_SCALER_EXPANSION; |
break; |
case RMX_CENTER: |
args.ucEnable = ATOM_SCALER_CENTER; |
break; |
case RMX_ASPECT: |
args.ucEnable = ATOM_SCALER_EXPANSION; |
break; |
default: |
if (ASIC_IS_AVIVO(rdev)) |
args.ucEnable = ATOM_SCALER_DISABLE; |
else |
args.ucEnable = ATOM_SCALER_CENTER; |
break; |
} |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
if ((is_tv || is_cv) |
&& rdev->family >= CHIP_RV515 && rdev->family <= CHIP_R580) { |
atom_rv515_force_tv_scaler(rdev, radeon_crtc); |
} |
} |
static void atombios_lock_crtc(struct drm_crtc *crtc, int lock) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
348,12 → 203,6 |
if (ASIC_IS_AVIVO(rdev)) { |
uint32_t ss_cntl; |
if ((rdev->family == CHIP_RS600) || |
(rdev->family == CHIP_RS690) || |
(rdev->family == CHIP_RS740)) |
pll_flags |= (RADEON_PLL_USE_FRAC_FB_DIV | |
RADEON_PLL_PREFER_CLOSEST_LOWER); |
if (ASIC_IS_DCE32(rdev) && mode->clock > 200000) /* range limits??? */ |
pll_flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; |
else |
389,7 → 238,6 |
pll_flags |= RADEON_PLL_USE_REF_DIV; |
} |
radeon_encoder = to_radeon_encoder(encoder); |
break; |
} |
} |
473,15 → 321,13 |
struct drm_gem_object *obj; |
struct drm_radeon_gem_object *obj_priv; |
uint64_t fb_location; |
uint32_t fb_format, fb_pitch_pixels, tiling_flags; |
uint32_t fb_format, fb_pitch_pixels; |
ENTRY(); |
if (!crtc->fb) |
return -EINVAL; |
dbgprintf("x = %d y = %d width = %d height = %d\n", |
x, y, crtc->fb->width, crtc->fb->height); |
dbgprintf("hdisplay = %d\n", crtc->mode.hdisplay); |
radeon_fb = to_radeon_framebuffer(crtc->fb); |
obj = radeon_fb->obj; |
494,14 → 340,9 |
fb_location = 0; //rdev->mc.vram_location; |
dbgprintf("fb_location %x\n", fb_location); |
dbgprintf("bpp %d\n", crtc->fb->bits_per_pixel); |
dbgprintf("bpp %x\n", crtc->fb->bits_per_pixel); |
switch (crtc->fb->bits_per_pixel) { |
case 8: |
fb_format = |
AVIVO_D1GRPH_CONTROL_DEPTH_8BPP | |
AVIVO_D1GRPH_CONTROL_8BPP_INDEXED; |
break; |
case 15: |
fb_format = |
AVIVO_D1GRPH_CONTROL_DEPTH_16BPP | |
524,14 → 365,7 |
return -EINVAL; |
} |
// radeon_object_get_tiling_flags(obj->driver_private, |
// &tiling_flags, NULL); |
// if (tiling_flags & RADEON_TILING_MACRO) |
// fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE; |
// if (tiling_flags & RADEON_TILING_MICRO) |
// fb_format |= AVIVO_D1GRPH_TILED; |
/* TODO tiling */ |
if (radeon_crtc->crtc_id == 0) |
WREG32(AVIVO_D1VGA_CONTROL, 0); |
else |
569,9 → 403,10 |
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0); |
if (old_fb && old_fb != crtc->fb) { |
// radeon_fb = to_radeon_framebuffer(old_fb); |
radeon_fb = to_radeon_framebuffer(old_fb); |
// radeon_gem_object_unpin(radeon_fb->obj); |
} |
LEAVE(); |
return 0; |
} |
585,41 → 420,18 |
struct radeon_device *rdev = dev->dev_private; |
struct drm_encoder *encoder; |
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION crtc_timing; |
int need_tv_timings = 0; |
bool ret; |
ENTRY(); |
/* TODO color tiling */ |
memset(&crtc_timing, 0, sizeof(crtc_timing)); |
/* TODO tv */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
/* find tv std */ |
if (encoder->crtc == crtc) { |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { |
struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv; |
if (tv_dac) { |
if (tv_dac->tv_std == TV_STD_NTSC || |
tv_dac->tv_std == TV_STD_NTSC_J || |
tv_dac->tv_std == TV_STD_PAL_M) |
need_tv_timings = 1; |
else |
need_tv_timings = 2; |
break; |
} |
} |
} |
} |
crtc_timing.ucCRTC = radeon_crtc->crtc_id; |
if (need_tv_timings) { |
ret = radeon_atom_get_tv_timings(rdev, need_tv_timings - 1, |
&crtc_timing, &adjusted_mode->clock); |
if (ret == false) |
need_tv_timings = 0; |
} |
if (!need_tv_timings) { |
crtc_timing.usH_Total = adjusted_mode->crtc_htotal; |
crtc_timing.usH_Disp = adjusted_mode->crtc_hdisplay; |
crtc_timing.usH_SyncStart = adjusted_mode->crtc_hsync_start; |
646,7 → 458,6 |
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) |
crtc_timing.susModeMiscInfo.usAccess |= ATOM_DOUBLE_CLOCK_MODE; |
} |
atombios_crtc_set_pll(crtc, adjusted_mode); |
atombios_crtc_set_timing(crtc, &crtc_timing); |
708,9 → 519,8 |
radeon_crtc_set_base(crtc, x, y, old_fb); |
radeon_legacy_atom_set_surface(crtc); |
} |
atombios_overscan_setup(crtc, mode, adjusted_mode); |
atombios_scaler_setup(crtc); |
radeon_bandwidth_update(rdev); |
LEAVE(); |
return 0; |
} |
718,8 → 528,6 |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
return false; |
return true; |
} |
752,3 → 560,150 |
AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL; |
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs); |
} |
void radeon_init_disp_bw_avivo(struct drm_device *dev, |
struct drm_display_mode *mode1, |
uint32_t pixel_bytes1, |
struct drm_display_mode *mode2, |
uint32_t pixel_bytes2) |
{ |
struct radeon_device *rdev = dev->dev_private; |
fixed20_12 min_mem_eff; |
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff; |
fixed20_12 sclk_ff, mclk_ff; |
uint32_t dc_lb_memory_split, temp; |
min_mem_eff.full = rfixed_const_8(0); |
if (rdev->disp_priority == 2) { |
uint32_t mc_init_misc_lat_timer = 0; |
if (rdev->family == CHIP_RV515) |
mc_init_misc_lat_timer = |
RREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER); |
else if (rdev->family == CHIP_RS690) |
mc_init_misc_lat_timer = |
RREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER); |
mc_init_misc_lat_timer &= |
~(R300_MC_DISP1R_INIT_LAT_MASK << |
R300_MC_DISP1R_INIT_LAT_SHIFT); |
mc_init_misc_lat_timer &= |
~(R300_MC_DISP0R_INIT_LAT_MASK << |
R300_MC_DISP0R_INIT_LAT_SHIFT); |
if (mode2) |
mc_init_misc_lat_timer |= |
(1 << R300_MC_DISP1R_INIT_LAT_SHIFT); |
if (mode1) |
mc_init_misc_lat_timer |= |
(1 << R300_MC_DISP0R_INIT_LAT_SHIFT); |
if (rdev->family == CHIP_RV515) |
WREG32_MC(RV515_MC_INIT_MISC_LAT_TIMER, |
mc_init_misc_lat_timer); |
else if (rdev->family == CHIP_RS690) |
WREG32_MC(RS690_MC_INIT_MISC_LAT_TIMER, |
mc_init_misc_lat_timer); |
} |
/* |
* determine is there is enough bw for current mode |
*/ |
temp_ff.full = rfixed_const(100); |
mclk_ff.full = rfixed_const(rdev->clock.default_mclk); |
mclk_ff.full = rfixed_div(mclk_ff, temp_ff); |
sclk_ff.full = rfixed_const(rdev->clock.default_sclk); |
sclk_ff.full = rfixed_div(sclk_ff, temp_ff); |
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
temp_ff.full = rfixed_const(temp); |
mem_bw.full = rfixed_mul(mclk_ff, temp_ff); |
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); |
pix_clk.full = 0; |
pix_clk2.full = 0; |
peak_disp_bw.full = 0; |
if (mode1) { |
temp_ff.full = rfixed_const(1000); |
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ |
pix_clk.full = rfixed_div(pix_clk, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes1); |
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); |
} |
if (mode2) { |
temp_ff.full = rfixed_const(1000); |
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ |
pix_clk2.full = rfixed_div(pix_clk2, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes2); |
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); |
} |
if (peak_disp_bw.full >= mem_bw.full) { |
DRM_ERROR |
("You may not have enough display bandwidth for current mode\n" |
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
printk("peak disp bw %d, mem_bw %d\n", |
rfixed_trunc(peak_disp_bw), rfixed_trunc(mem_bw)); |
} |
/* |
* Line Buffer Setup |
* There is a single line buffer shared by both display controllers. |
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between the display |
* controllers. The paritioning can either be done manually or via one of four |
* preset allocations specified in bits 1:0: |
* 0 - line buffer is divided in half and shared between each display controller |
* 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
* 2 - D1 gets the whole buffer |
* 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
* Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual allocation mode. |
* In manual allocation mode, D1 always starts at 0, D1 end/2 is specified in bits |
* 14:4; D2 allocation follows D1. |
*/ |
/* is auto or manual better ? */ |
dc_lb_memory_split = |
RREG32(AVIVO_DC_LB_MEMORY_SPLIT) & ~AVIVO_DC_LB_MEMORY_SPLIT_MASK; |
dc_lb_memory_split &= ~AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; |
#if 1 |
/* auto */ |
if (mode1 && mode2) { |
if (mode1->hdisplay > mode2->hdisplay) { |
if (mode1->hdisplay > 2560) |
dc_lb_memory_split |= |
AVIVO_DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
else |
dc_lb_memory_split |= |
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
} else if (mode2->hdisplay > mode1->hdisplay) { |
if (mode2->hdisplay > 2560) |
dc_lb_memory_split |= |
AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
else |
dc_lb_memory_split |= |
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
} else |
dc_lb_memory_split |= |
AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
} else if (mode1) { |
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_ONLY; |
} else if (mode2) { |
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
} |
#else |
/* manual */ |
dc_lb_memory_split |= AVIVO_DC_LB_MEMORY_SPLIT_SHIFT_MODE; |
dc_lb_memory_split &= |
~(AVIVO_DC_LB_DISP1_END_ADR_MASK << |
AVIVO_DC_LB_DISP1_END_ADR_SHIFT); |
if (mode1) { |
dc_lb_memory_split |= |
((((mode1->hdisplay / 2) + 64) & AVIVO_DC_LB_DISP1_END_ADR_MASK) |
<< AVIVO_DC_LB_DISP1_END_ADR_SHIFT); |
} else if (mode2) { |
dc_lb_memory_split |= (0 << AVIVO_DC_LB_DISP1_END_ADR_SHIFT); |
} |
#endif |
WREG32(AVIVO_DC_LB_MEMORY_SPLIT, dc_lb_memory_split); |
} |
/drivers/video/drm/radeon/makefile |
---|
4,6 → 4,7 |
CFLAGS = -c -O2 -fomit-frame-pointer -fno-builtin-printf |
LDFLAGS = -nostdlib -shared -s -Map atikms.map --image-base 0 --file-alignment 512 --section-alignment 4096 |
DRM_TOPDIR = $(CURDIR)/.. |
DRM_INCLUDES = $(DRM_TOPDIR)/include |
13,16 → 14,16 |
NAME:= atikms |
INCLUDES = -I $(DRM_INCLUDES) -I $(DRM_INCLUDES)/linux -I $(DRM_INCLUDES)/drm |
INCLUDES = -I $(DRM_INCLUDES) -I $(DRM_INCLUDES)/ttm |
HFILES:= $(DRM_INCLUDES)/linux/types.h \ |
$(DRM_INCLUDES)/linux/list.h \ |
HFILES:= $(DRM_INCLUDES)/types.h \ |
$(DRM_INCLUDES)/list.h \ |
$(DRM_INCLUDES)/pci.h \ |
$(DRM_INCLUDES)/drm.h \ |
$(DRM_INCLUDES)/drm/drmP.h \ |
$(DRM_INCLUDES)/drmP.h \ |
$(DRM_INCLUDES)/drm_edid.h \ |
$(DRM_INCLUDES)/drm/drm_crtc.h \ |
$(DRM_INCLUDES)/drm/drm_mode.h \ |
$(DRM_INCLUDES)/drm_crtc.h \ |
$(DRM_INCLUDES)/drm_mode.h \ |
$(DRM_INCLUDES)/drm_mm.h \ |
atom.h \ |
radeon.h \ |
35,7 → 36,6 |
$(DRM_TOPDIR)/drm_modes.c \ |
$(DRM_TOPDIR)/drm_crtc.c \ |
$(DRM_TOPDIR)/drm_crtc_helper.c \ |
$(DRM_TOPDIR)/drm_fb_helper.c \ |
$(DRM_TOPDIR)/i2c/i2c-core.c \ |
$(DRM_TOPDIR)/i2c/i2c-algo-bit.c \ |
$(DRM_TOPDIR)/idr.c \ |
52,14 → 52,11 |
radeon_combios.c \ |
radeon_legacy_crtc.c \ |
radeon_legacy_encoders.c \ |
radeon_legacy_tv.c \ |
radeon_display.c \ |
radeon_cursor.c \ |
radeon_object.c \ |
radeon_gart.c \ |
radeon_ring.c \ |
r100.c \ |
r200.c \ |
r300.c \ |
r420.c \ |
rv515.c \ |
74,8 → 71,8 |
SRC_DEP:= |
NAME_OBJS = $(patsubst %.s, %.o, $(patsubst %.asm, %.o,\ |
$(patsubst %.c, %.o, $(NAME_SRC)))) |
NAME_OBJS = $(patsubst %.s, %.obj, $(patsubst %.asm, %.obj,\ |
$(patsubst %.c, %.obj, $(NAME_SRC)))) |
85,5 → 82,5 |
ld -L$(LIBPATH) $(LDFLAGS) -T atikms.lds -o $@ $(NAME_OBJS) vsprintf.obj icompute.obj $(LIBS) |
%.o : %.c $(HFILES) Makefile |
%.obj : %.c $(HFILES) Makefile |
$(CC) $(CFLAGS) $(DEFINES) $(INCLUDES) -o $@ -c $< |
/drivers/video/drm/radeon/r100.c |
---|
25,7 → 25,7 |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include <linux/seq_file.h> |
//#include <linux/seq_file.h> |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_drm.h" |
32,16 → 32,12 |
#include "radeon_microcode.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "r100d.h" |
#include "r100_reg_safe.h" |
#include "rn50_reg_safe.h" |
/* This files gather functions specifics to: |
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
* |
* Some of these functions might be used by newer ASICs. |
*/ |
int r200_init(struct radeon_device *rdev); |
void r100_hdp_reset(struct radeon_device *rdev); |
void r100_gpu_init(struct radeon_device *rdev); |
int r100_gui_wait_for_idle(struct radeon_device *rdev); |
50,7 → 46,6 |
void r100_gpu_wait_for_vsync2(struct radeon_device *rdev); |
int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
/* |
* PCI GART |
*/ |
62,28 → 57,23 |
* could end up in wrong address. */ |
} |
int r100_pci_gart_init(struct radeon_device *rdev) |
int r100_pci_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
int r; |
if (rdev->gart.table.ram.ptr) { |
WARN(1, "R100 PCI GART already initialized.\n"); |
return 0; |
} |
/* Initialize common gart structure */ |
r = radeon_gart_init(rdev); |
if (r) |
if (r) { |
return r; |
} |
if (rdev->gart.table.ram.ptr == NULL) { |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
return radeon_gart_table_ram_alloc(rdev); |
r = radeon_gart_table_ram_alloc(rdev); |
if (r) { |
return r; |
} |
int r100_pci_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
} |
/* discard memory request outside of configured range */ |
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS; |
WREG32(RADEON_AIC_CNTL, tmp); |
114,21 → 104,24 |
WREG32(RADEON_AIC_HI_ADDR, 0); |
} |
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
{ |
if (i < 0 || i > rdev->gart.num_gpu_pages) { |
return -EINVAL; |
} |
rdev->gart.table.ram.ptr[i] = cpu_to_le32(lower_32_bits(addr)); |
rdev->gart.table.ram.ptr[i] = cpu_to_le32((uint32_t)addr); |
return 0; |
} |
void r100_pci_gart_fini(struct radeon_device *rdev) |
int r100_gart_enable(struct radeon_device *rdev) |
{ |
if (rdev->flags & RADEON_IS_AGP) { |
r100_pci_gart_disable(rdev); |
radeon_gart_table_ram_free(rdev); |
radeon_gart_fini(rdev); |
return 0; |
} |
return r100_pci_gart_enable(rdev); |
} |
/* |
180,12 → 173,8 |
DRM_ERROR("Failed to register debugfs file for R100 MC !\n"); |
} |
/* Write VRAM size in case we are limiting it */ |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
/* Novell bug 204882 for RN50/M6/M7 with 8/16/32MB VRAM, |
* if the aperture is 64MB but we have 32MB VRAM |
* we report only 32MB VRAM but we have to set MC_FB_LOCATION |
* to 64MB, otherwise the gpu accidentially dies */ |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32(RADEON_MC_FB_LOCATION, tmp); |
226,7 → 215,18 |
r100_pci_gart_disable(rdev); |
/* Setup GPU memory space */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
rdev->mc.gtt_location = 0xFFFFFFFFUL; |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
if (r) { |
printk(KERN_WARNING "[drm] Disabling AGP\n"); |
rdev->flags &= ~RADEON_IS_AGP; |
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
} else { |
rdev->mc.gtt_location = rdev->mc.agp_base; |
} |
} |
r = radeon_mc_setup(rdev); |
if (r) { |
return r; |
244,17 → 244,11 |
void r100_mc_fini(struct radeon_device *rdev) |
{ |
r100_pci_gart_disable(rdev); |
// radeon_gart_table_ram_free(rdev); |
// radeon_gart_fini(rdev); |
} |
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc) |
{ |
if (crtc == 0) |
return RREG32(RADEON_CRTC_CRNT_FRAME); |
else |
return RREG32(RADEON_CRTC2_CRNT_FRAME); |
} |
/* |
* Fence emission |
*/ |
303,21 → 297,14 |
return r; |
} |
} |
WREG32(R_000774_SCRATCH_ADDR, rdev->wb.gpu_addr); |
WREG32(R_00070C_CP_RB_RPTR_ADDR, |
S_00070C_RB_RPTR_ADDR((rdev->wb.gpu_addr + 1024) >> 2)); |
WREG32(R_000770_SCRATCH_UMSK, 0xff); |
WREG32(0x774, rdev->wb.gpu_addr); |
WREG32(0x70C, rdev->wb.gpu_addr + 1024); |
WREG32(0x770, 0xff); |
return 0; |
} |
void r100_wb_disable(struct radeon_device *rdev) |
{ |
WREG32(R_000770_SCRATCH_UMSK, 0); |
} |
void r100_wb_fini(struct radeon_device *rdev) |
{ |
r100_wb_disable(rdev); |
if (rdev->wb.wb_obj) { |
// radeon_object_kunmap(rdev->wb.wb_obj); |
// radeon_object_unpin(rdev->wb.wb_obj); |
327,6 → 314,7 |
} |
} |
int r100_copy_blit(struct radeon_device *rdev, |
uint64_t src_offset, |
uint64_t dst_offset, |
405,21 → 393,6 |
/* |
* CP |
*/ |
static int r100_cp_wait_for_idle(struct radeon_device *rdev) |
{ |
unsigned i; |
u32 tmp; |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(R_000E40_RBBM_STATUS); |
if (!G_000E40_CP_CMDSTRM_BUSY(tmp)) { |
return 0; |
} |
udelay(1); |
} |
return -1; |
} |
void r100_ring_start(struct radeon_device *rdev) |
{ |
int r; |
510,12 → 483,6 |
} |
} |
static int r100_cp_init_microcode(struct radeon_device *rdev) |
{ |
return 0; |
} |
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) |
{ |
unsigned rb_bufsz; |
550,15 → 517,6 |
} else { |
DRM_INFO("radeon: cp idle (0x%08X)\n", tmp); |
} |
if (!rdev->me_fw) { |
r = r100_cp_init_microcode(rdev); |
if (r) { |
DRM_ERROR("Failed to load firmware!\n"); |
return r; |
} |
} |
/* Align ring size */ |
rb_bufsz = drm_order(ring_size / 8); |
ring_size = (1 << (rb_bufsz + 1)) * 4; |
630,13 → 588,12 |
return 0; |
} |
void r100_cp_fini(struct radeon_device *rdev) |
{ |
if (r100_cp_wait_for_idle(rdev)) { |
DRM_ERROR("Wait for CP idle timeout, shutting down CP.\n"); |
} |
/* Disable ring */ |
r100_cp_disable(rdev); |
rdev->cp.ready = false; |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
radeon_ring_fini(rdev); |
DRM_INFO("radeon: cp finalized\n"); |
} |
653,6 → 610,7 |
} |
} |
int r100_cp_reset(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
659,8 → 617,9 |
bool reinit_cp; |
int i; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
reinit_cp = rdev->cp.ready; |
rdev->cp.ready = false; |
WREG32(RADEON_CP_CSQ_MODE, 0); |
688,13 → 647,6 |
return -1; |
} |
void r100_cp_commit(struct radeon_device *rdev) |
{ |
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); |
(void)RREG32(RADEON_CP_RB_WPTR); |
} |
#if 0 |
/* |
* CS functions |
773,7 → 725,7 |
unsigned idx) |
{ |
struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; |
uint32_t header; |
uint32_t header = ib_chunk->kdata[idx]; |
if (idx >= ib_chunk->length_dw) { |
DRM_ERROR("Can not parse packet at %d after CS end %d !\n", |
780,7 → 732,6 |
idx, ib_chunk->length_dw); |
return -EINVAL; |
} |
header = ib_chunk->kdata[idx]; |
pkt->idx = idx; |
pkt->type = CP_PACKET_GET_TYPE(header); |
pkt->count = CP_PACKET_GET_COUNT(header); |
808,102 → 759,6 |
} |
/** |
* r100_cs_packet_next_vline() - parse userspace VLINE packet |
* @parser: parser structure holding parsing context. |
* |
* Userspace sends a special sequence for VLINE waits. |
* PACKET0 - VLINE_START_END + value |
* PACKET0 - WAIT_UNTIL +_value |
* RELOC (P3) - crtc_id in reloc. |
* |
* This function parses this and relocates the VLINE START END |
* and WAIT UNTIL packets to the correct crtc. |
* It also detects a switched off crtc and nulls out the |
* wait in that case. |
*/ |
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct drm_mode_object *obj; |
struct drm_crtc *crtc; |
struct radeon_crtc *radeon_crtc; |
struct radeon_cs_packet p3reloc, waitreloc; |
int crtc_id; |
int r; |
uint32_t header, h_idx, reg; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
/* parse the wait until */ |
r = r100_cs_packet_parse(p, &waitreloc, p->idx); |
if (r) |
return r; |
/* check its a wait until and only 1 count */ |
if (waitreloc.reg != RADEON_WAIT_UNTIL || |
waitreloc.count != 0) { |
DRM_ERROR("vline wait had illegal wait until segment\n"); |
r = -EINVAL; |
return r; |
} |
if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { |
DRM_ERROR("vline wait had illegal wait until\n"); |
r = -EINVAL; |
return r; |
} |
/* jump over the NOP */ |
r = r100_cs_packet_parse(p, &p3reloc, p->idx); |
if (r) |
return r; |
h_idx = p->idx - 2; |
p->idx += waitreloc.count; |
p->idx += p3reloc.count; |
header = ib_chunk->kdata[h_idx]; |
crtc_id = ib_chunk->kdata[h_idx + 5]; |
reg = ib_chunk->kdata[h_idx] >> 2; |
mutex_lock(&p->rdev->ddev->mode_config.mutex); |
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
DRM_ERROR("cannot find crtc %d\n", crtc_id); |
r = -EINVAL; |
goto out; |
} |
crtc = obj_to_crtc(obj); |
radeon_crtc = to_radeon_crtc(crtc); |
crtc_id = radeon_crtc->crtc_id; |
if (!crtc->enabled) { |
/* if the CRTC isn't enabled - we need to nop out the wait until */ |
ib_chunk->kdata[h_idx + 2] = PACKET2(0); |
ib_chunk->kdata[h_idx + 3] = PACKET2(0); |
} else if (crtc_id == 1) { |
switch (reg) { |
case AVIVO_D1MODE_VLINE_START_END: |
header &= R300_CP_PACKET0_REG_MASK; |
header |= AVIVO_D2MODE_VLINE_START_END >> 2; |
break; |
case RADEON_CRTC_GUI_TRIG_VLINE: |
header &= R300_CP_PACKET0_REG_MASK; |
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; |
break; |
default: |
DRM_ERROR("unknown crtc reloc\n"); |
r = -EINVAL; |
goto out; |
} |
ib_chunk->kdata[h_idx] = header; |
ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; |
} |
out: |
mutex_unlock(&p->rdev->ddev->mode_config.mutex); |
return r; |
} |
/** |
* r100_cs_packet_next_reloc() - parse next packet which should be reloc packet3 |
* @parser: parser structure holding parsing context. |
* @data: pointer to relocation data |
953,95 → 808,33 |
return 0; |
} |
static int r100_get_vtx_size(uint32_t vtx_fmt) |
{ |
int vtx_size; |
vtx_size = 2; |
/* ordered according to bits in spec */ |
if (vtx_fmt & RADEON_SE_VTX_FMT_W0) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPCOLOR) |
vtx_size += 3; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPALPHA) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_PKCOLOR) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPSPEC) |
vtx_size += 3; |
if (vtx_fmt & RADEON_SE_VTX_FMT_FPFOG) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_PKSPEC) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST0) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST1) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST2) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q2) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_ST3) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q3) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Q0) |
vtx_size++; |
/* blend weight */ |
if (vtx_fmt & (0x7 << 15)) |
vtx_size += (vtx_fmt >> 15) & 0x7; |
if (vtx_fmt & RADEON_SE_VTX_FMT_N0) |
vtx_size += 3; |
if (vtx_fmt & RADEON_SE_VTX_FMT_XY1) |
vtx_size += 2; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Z1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_W1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_N1) |
vtx_size++; |
if (vtx_fmt & RADEON_SE_VTX_FMT_Z) |
vtx_size++; |
return vtx_size; |
} |
static int r100_packet0_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt, |
unsigned idx, unsigned reg) |
struct radeon_cs_packet *pkt) |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct radeon_cs_reloc *reloc; |
struct r100_cs_track *track; |
volatile uint32_t *ib; |
uint32_t tmp; |
unsigned reg; |
unsigned i; |
unsigned idx; |
bool onereg; |
int r; |
int i, face; |
u32 tile_flags = 0; |
ib = p->ib->ptr; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
track = (struct r100_cs_track *)p->track; |
idx = pkt->idx + 1; |
reg = pkt->reg; |
onereg = false; |
if (CP_PACKET0_GET_ONE_REG_WR(ib_chunk->kdata[pkt->idx])) { |
onereg = true; |
} |
for (i = 0; i <= pkt->count; i++, idx++, reg += 4) { |
switch (reg) { |
case RADEON_CRTC_GUI_TRIG_VLINE: |
r = r100_cs_packet_parse_vline(p); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
break; |
/* FIXME: only allow PACKET3 blit? easier to check for out of |
* range access */ |
case RADEON_DST_PITCH_OFFSET: |
case RADEON_SRC_PITCH_OFFSET: |
r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
if (r) |
return r; |
break; |
case RADEON_RB3D_DEPTHOFFSET: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
1049,26 → 842,39 |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->zb.robj = reloc->robj; |
track->zb.offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
tmp = ib_chunk->kdata[idx] & 0x003fffff; |
tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; |
break; |
case RADEON_RB3D_DEPTHOFFSET: |
case RADEON_RB3D_COLOROFFSET: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->cb[0].robj = reloc->robj; |
track->cb[0].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
break; |
case R300_RB3D_COLOROFFSET0: |
case R300_ZB_DEPTHOFFSET: |
case R200_PP_TXOFFSET_0: |
case R200_PP_TXOFFSET_1: |
case R200_PP_TXOFFSET_2: |
case R200_PP_TXOFFSET_3: |
case R200_PP_TXOFFSET_4: |
case R200_PP_TXOFFSET_5: |
case RADEON_PP_TXOFFSET_0: |
case RADEON_PP_TXOFFSET_1: |
case RADEON_PP_TXOFFSET_2: |
i = (reg - RADEON_PP_TXOFFSET_0) / 24; |
case R300_TX_OFFSET_0: |
case R300_TX_OFFSET_0+4: |
case R300_TX_OFFSET_0+8: |
case R300_TX_OFFSET_0+12: |
case R300_TX_OFFSET_0+16: |
case R300_TX_OFFSET_0+20: |
case R300_TX_OFFSET_0+24: |
case R300_TX_OFFSET_0+28: |
case R300_TX_OFFSET_0+32: |
case R300_TX_OFFSET_0+36: |
case R300_TX_OFFSET_0+40: |
case R300_TX_OFFSET_0+44: |
case R300_TX_OFFSET_0+48: |
case R300_TX_OFFSET_0+52: |
case R300_TX_OFFSET_0+56: |
case R300_TX_OFFSET_0+60: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
1077,233 → 883,16 |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[i].robj = reloc->robj; |
break; |
case RADEON_PP_CUBIC_OFFSET_T0_0: |
case RADEON_PP_CUBIC_OFFSET_T0_1: |
case RADEON_PP_CUBIC_OFFSET_T0_2: |
case RADEON_PP_CUBIC_OFFSET_T0_3: |
case RADEON_PP_CUBIC_OFFSET_T0_4: |
i = (reg - RADEON_PP_CUBIC_OFFSET_T0_0) / 4; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[0].cube_info[i].robj = reloc->robj; |
break; |
case RADEON_PP_CUBIC_OFFSET_T1_0: |
case RADEON_PP_CUBIC_OFFSET_T1_1: |
case RADEON_PP_CUBIC_OFFSET_T1_2: |
case RADEON_PP_CUBIC_OFFSET_T1_3: |
case RADEON_PP_CUBIC_OFFSET_T1_4: |
i = (reg - RADEON_PP_CUBIC_OFFSET_T1_0) / 4; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[1].cube_info[i].robj = reloc->robj; |
break; |
case RADEON_PP_CUBIC_OFFSET_T2_0: |
case RADEON_PP_CUBIC_OFFSET_T2_1: |
case RADEON_PP_CUBIC_OFFSET_T2_2: |
case RADEON_PP_CUBIC_OFFSET_T2_3: |
case RADEON_PP_CUBIC_OFFSET_T2_4: |
i = (reg - RADEON_PP_CUBIC_OFFSET_T2_0) / 4; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->textures[2].cube_info[i].robj = reloc->robj; |
break; |
case RADEON_RE_WIDTH_HEIGHT: |
track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); |
break; |
case RADEON_RB3D_COLORPITCH: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
tile_flags |= RADEON_COLOR_TILE_ENABLE; |
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; |
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); |
tmp |= tile_flags; |
ib[idx] = tmp; |
track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; |
break; |
case RADEON_RB3D_DEPTHPITCH: |
track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; |
break; |
case RADEON_RB3D_CNTL: |
switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { |
case 7: |
case 8: |
case 9: |
case 11: |
case 12: |
track->cb[0].cpp = 1; |
break; |
case 3: |
case 4: |
case 15: |
track->cb[0].cpp = 2; |
break; |
case 6: |
track->cb[0].cpp = 4; |
break; |
default: |
DRM_ERROR("Invalid color buffer format (%d) !\n", |
((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); |
return -EINVAL; |
} |
track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); |
/* FIXME: we don't want to allow anyothers packet */ |
break; |
case RADEON_RB3D_ZSTENCILCNTL: |
switch (ib_chunk->kdata[idx] & 0xf) { |
case 0: |
track->zb.cpp = 2; |
break; |
case 2: |
case 3: |
case 4: |
case 5: |
case 9: |
case 11: |
track->zb.cpp = 4; |
break; |
default: |
break; |
} |
if (onereg) { |
/* FIXME: forbid onereg write to register on relocate */ |
break; |
case RADEON_RB3D_ZPASS_ADDR: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
break; |
case RADEON_PP_CNTL: |
{ |
uint32_t temp = ib_chunk->kdata[idx] >> 4; |
for (i = 0; i < track->num_texture; i++) |
track->textures[i].enabled = !!(temp & (1 << i)); |
} |
break; |
case RADEON_SE_VF_CNTL: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
break; |
case RADEON_SE_VTX_FMT: |
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); |
break; |
case RADEON_PP_TEX_SIZE_0: |
case RADEON_PP_TEX_SIZE_1: |
case RADEON_PP_TEX_SIZE_2: |
i = (reg - RADEON_PP_TEX_SIZE_0) / 8; |
track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; |
track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; |
break; |
case RADEON_PP_TEX_PITCH_0: |
case RADEON_PP_TEX_PITCH_1: |
case RADEON_PP_TEX_PITCH_2: |
i = (reg - RADEON_PP_TEX_PITCH_0) / 8; |
track->textures[i].pitch = ib_chunk->kdata[idx] + 32; |
break; |
case RADEON_PP_TXFILTER_0: |
case RADEON_PP_TXFILTER_1: |
case RADEON_PP_TXFILTER_2: |
i = (reg - RADEON_PP_TXFILTER_0) / 24; |
track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) |
>> RADEON_MAX_MIP_LEVEL_SHIFT); |
tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; |
if (tmp == 2 || tmp == 6) |
track->textures[i].roundup_w = false; |
tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; |
if (tmp == 2 || tmp == 6) |
track->textures[i].roundup_h = false; |
break; |
case RADEON_PP_TXFORMAT_0: |
case RADEON_PP_TXFORMAT_1: |
case RADEON_PP_TXFORMAT_2: |
i = (reg - RADEON_PP_TXFORMAT_0) / 24; |
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { |
track->textures[i].use_pitch = 1; |
} else { |
track->textures[i].use_pitch = 0; |
track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); |
track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); |
} |
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) |
track->textures[i].tex_coord_type = 2; |
switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { |
case RADEON_TXFORMAT_I8: |
case RADEON_TXFORMAT_RGB332: |
case RADEON_TXFORMAT_Y8: |
track->textures[i].cpp = 1; |
break; |
case RADEON_TXFORMAT_AI88: |
case RADEON_TXFORMAT_ARGB1555: |
case RADEON_TXFORMAT_RGB565: |
case RADEON_TXFORMAT_ARGB4444: |
case RADEON_TXFORMAT_VYUY422: |
case RADEON_TXFORMAT_YVYU422: |
case RADEON_TXFORMAT_DXT1: |
case RADEON_TXFORMAT_SHADOW16: |
case RADEON_TXFORMAT_LDUDV655: |
case RADEON_TXFORMAT_DUDV88: |
track->textures[i].cpp = 2; |
break; |
case RADEON_TXFORMAT_ARGB8888: |
case RADEON_TXFORMAT_RGBA8888: |
case RADEON_TXFORMAT_DXT23: |
case RADEON_TXFORMAT_DXT45: |
case RADEON_TXFORMAT_SHADOW32: |
case RADEON_TXFORMAT_LDUDUV8888: |
track->textures[i].cpp = 4; |
break; |
} |
track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); |
track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); |
break; |
case RADEON_PP_CUBIC_FACES_0: |
case RADEON_PP_CUBIC_FACES_1: |
case RADEON_PP_CUBIC_FACES_2: |
tmp = ib_chunk->kdata[idx]; |
i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; |
for (face = 0; face < 4; face++) { |
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); |
track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); |
} |
break; |
default: |
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
reg, idx); |
return -EINVAL; |
} |
return 0; |
} |
1331,7 → 920,6 |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct radeon_cs_reloc *reloc; |
struct r100_cs_track *track; |
unsigned idx; |
unsigned i, c; |
volatile uint32_t *ib; |
1340,11 → 928,9 |
ib = p->ib->ptr; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
idx = pkt->idx + 1; |
track = (struct r100_cs_track *)p->track; |
switch (pkt->opcode) { |
case PACKET3_3D_LOAD_VBPNTR: |
c = ib_chunk->kdata[idx++]; |
track->num_arrays = c; |
for (i = 0; i < (c - 1); i += 2, idx += 3) { |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
1354,9 → 940,6 |
return r; |
} |
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); |
track->arrays[i + 0].robj = reloc->robj; |
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; |
track->arrays[i + 0].esize &= 0x7F; |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for packet3 %d\n", |
1365,9 → 948,6 |
return r; |
} |
ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset); |
track->arrays[i + 1].robj = reloc->robj; |
track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24; |
track->arrays[i + 1].esize &= 0x7F; |
} |
if (c & 1) { |
r = r100_cs_packet_next_reloc(p, &reloc); |
1378,9 → 958,6 |
return r; |
} |
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); |
track->arrays[i + 0].robj = reloc->robj; |
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8; |
track->arrays[i + 0].esize &= 0x7F; |
} |
break; |
case PACKET3_INDX_BUFFER: |
1397,6 → 974,7 |
} |
break; |
case 0x23: |
/* FIXME: cleanup */ |
/* 3D_RNDR_GEN_INDX_PRIM on r100/r200 */ |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
1405,71 → 983,18 |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
track->num_arrays = 1; |
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); |
track->arrays[0].robj = reloc->robj; |
track->arrays[0].esize = track->vtx_size; |
track->max_indx = ib_chunk->kdata[idx+1]; |
track->vap_vf_cntl = ib_chunk->kdata[idx+3]; |
track->immd_dwords = pkt->count - 1; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
case PACKET3_3D_DRAW_IMMD: |
if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { |
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
return -EINVAL; |
} |
track->vap_vf_cntl = ib_chunk->kdata[idx+1]; |
track->immd_dwords = pkt->count - 1; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using in-packet vertex data */ |
case PACKET3_3D_DRAW_IMMD_2: |
if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { |
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); |
return -EINVAL; |
} |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
track->immd_dwords = pkt->count; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using in-packet vertex data */ |
case PACKET3_3D_DRAW_VBUF_2: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing of vertex buffers setup elsewhere */ |
case PACKET3_3D_DRAW_INDX_2: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using indices to vertex buffer */ |
case PACKET3_3D_DRAW_VBUF: |
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing of vertex buffers setup elsewhere */ |
case PACKET3_3D_DRAW_INDX: |
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; |
r = r100_cs_track_check(p->rdev, track); |
if (r) |
return r; |
break; |
/* triggers drawing using indices to vertex buffer */ |
case PACKET3_NOP: |
break; |
1483,12 → 1008,8 |
int r100_cs_parse(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_packet pkt; |
struct r100_cs_track *track; |
int r; |
track = kzalloc(sizeof(*track), GFP_KERNEL); |
r100_cs_track_clear(p->rdev, track); |
p->track = track; |
do { |
r = r100_cs_packet_parse(p, &pkt, p->idx); |
if (r) { |
1497,16 → 1018,7 |
p->idx += pkt.count + 2; |
switch (pkt.type) { |
case PACKET_TYPE0: |
if (p->rdev->family >= CHIP_R200) |
r = r100_cs_parse_packet0(p, &pkt, |
p->rdev->config.r100.reg_safe_bm, |
p->rdev->config.r100.reg_safe_bm_size, |
&r200_packet0_check); |
else |
r = r100_cs_parse_packet0(p, &pkt, |
p->rdev->config.r100.reg_safe_bm, |
p->rdev->config.r100.reg_safe_bm_size, |
&r100_packet0_check); |
r = r100_packet0_check(p, &pkt); |
break; |
case PACKET_TYPE2: |
break; |
1545,6 → 1057,8 |
} |
} |
/* Wait for vertical sync on primary CRTC */ |
void r100_gpu_wait_for_vsync(struct radeon_device *rdev) |
{ |
1649,7 → 1163,7 |
{ |
uint32_t tmp; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
tmp = RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL; |
tmp |= (7 << 28); |
1666,7 → 1180,7 |
uint32_t tmp; |
int i; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
WREG32(RADEON_RBBM_SOFT_RESET, RADEON_SOFT_RESET_E2); |
(void)RREG32(RADEON_RBBM_SOFT_RESET); |
1755,117 → 1269,31 |
} |
} |
static u32 r100_get_accessible_vram(struct radeon_device *rdev) |
void r100_vram_info(struct radeon_device *rdev) |
{ |
u32 aper_size; |
u8 byte; |
r100_vram_get_type(rdev); |
aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
/* Set HDP_APER_CNTL only on cards that are known not to be broken, |
* that is has the 2nd generation multifunction PCI interface |
*/ |
if (rdev->family == CHIP_RV280 || |
rdev->family >= CHIP_RV350) { |
WREG32_P(RADEON_HOST_PATH_CNTL, RADEON_HDP_APER_CNTL, |
~RADEON_HDP_APER_CNTL); |
DRM_INFO("Generation 2 PCI interface, using max accessible memory\n"); |
return aper_size * 2; |
} |
/* Older cards have all sorts of funny issues to deal with. First |
* check if it's a multifunction card by reading the PCI config |
* header type... Limit those to one aperture size |
*/ |
// pci_read_config_byte(rdev->pdev, 0xe, &byte); |
// if (byte & 0x80) { |
// DRM_INFO("Generation 1 PCI interface in multifunction mode\n"); |
// DRM_INFO("Limiting VRAM to one aperture\n"); |
// return aper_size; |
// } |
/* Single function older card. We read HDP_APER_CNTL to see how the BIOS |
* have set it up. We don't write this as it's broken on some ASICs but |
* we expect the BIOS to have done the right thing (might be too optimistic...) |
*/ |
if (RREG32(RADEON_HOST_PATH_CNTL) & RADEON_HDP_APER_CNTL) |
return aper_size * 2; |
return aper_size; |
} |
void r100_vram_init_sizes(struct radeon_device *rdev) |
{ |
u64 config_aper_size; |
u32 accessible; |
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
if (rdev->flags & RADEON_IS_IGP) { |
uint32_t tom; |
/* read NB_TOM to get the amount of ram stolen for the GPU */ |
tom = RREG32(RADEON_NB_TOM); |
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
/* for IGPs we need to keep VRAM where it was put by the BIOS */ |
rdev->mc.vram_location = (tom & 0xffff) << 16; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
} else { |
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
/* Some production boards of m6 will report 0 |
* if it's 8 MB |
*/ |
if (rdev->mc.real_vram_size == 0) { |
rdev->mc.real_vram_size = 8192 * 1024; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
if (rdev->mc.vram_size == 0) { |
rdev->mc.vram_size = 8192 * 1024; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
} |
/* let driver place VRAM */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
/* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - |
* Novell bug 204882 + along with lots of ubuntu ones */ |
if (config_aper_size > rdev->mc.real_vram_size) |
rdev->mc.mc_vram_size = config_aper_size; |
else |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
} |
/* work out accessible VRAM */ |
accessible = r100_get_accessible_vram(rdev); |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
if (accessible > rdev->mc.aper_size) |
accessible = rdev->mc.aper_size; |
if (rdev->mc.mc_vram_size > rdev->mc.aper_size) |
rdev->mc.mc_vram_size = rdev->mc.aper_size; |
if (rdev->mc.real_vram_size > rdev->mc.aper_size) |
rdev->mc.real_vram_size = rdev->mc.aper_size; |
} |
void r100_vga_set_state(struct radeon_device *rdev, bool state) |
{ |
uint32_t temp; |
temp = RREG32(RADEON_CONFIG_CNTL); |
if (state == false) { |
temp &= ~(1<<8); |
temp |= (1<<9); |
} else { |
temp &= ~(1<<9); |
} |
WREG32(RADEON_CONFIG_CNTL, temp); |
} |
void r100_vram_info(struct radeon_device *rdev) |
{ |
r100_vram_get_type(rdev); |
r100_vram_init_sizes(rdev); |
} |
/* |
* Indirect registers accessor |
*/ |
1922,17 → 1350,28 |
r100_pll_errata_after_data(rdev); |
} |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
if (reg < 0x10000) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
} |
} |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
if (reg < 0x10000) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
} |
} |
int r100_init(struct radeon_device *rdev) |
{ |
if (ASIC_IS_RN50(rdev)) { |
rdev->config.r100.reg_safe_bm = rn50_reg_safe_bm; |
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(rn50_reg_safe_bm); |
} else if (rdev->family < CHIP_R200) { |
rdev->config.r100.reg_safe_bm = r100_reg_safe_bm; |
rdev->config.r100.reg_safe_bm_size = ARRAY_SIZE(r100_reg_safe_bm); |
} else { |
return r200_init(rdev); |
} |
return 0; |
} |
2106,611 → 1545,3 |
return 0; |
#endif |
} |
int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size) |
{ |
int surf_index = reg * 16; |
int flags = 0; |
/* r100/r200 divide by 16 */ |
if (rdev->family < CHIP_R300) |
flags = pitch / 16; |
else |
flags = pitch / 8; |
if (rdev->family <= CHIP_RS200) { |
if ((tiling_flags & (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
== (RADEON_TILING_MACRO|RADEON_TILING_MICRO)) |
flags |= RADEON_SURF_TILE_COLOR_BOTH; |
if (tiling_flags & RADEON_TILING_MACRO) |
flags |= RADEON_SURF_TILE_COLOR_MACRO; |
} else if (rdev->family <= CHIP_RV280) { |
if (tiling_flags & (RADEON_TILING_MACRO)) |
flags |= R200_SURF_TILE_COLOR_MACRO; |
if (tiling_flags & RADEON_TILING_MICRO) |
flags |= R200_SURF_TILE_COLOR_MICRO; |
} else { |
if (tiling_flags & RADEON_TILING_MACRO) |
flags |= R300_SURF_TILE_MACRO; |
if (tiling_flags & RADEON_TILING_MICRO) |
flags |= R300_SURF_TILE_MICRO; |
} |
if (tiling_flags & RADEON_TILING_SWAP_16BIT) |
flags |= RADEON_SURF_AP0_SWP_16BPP | RADEON_SURF_AP1_SWP_16BPP; |
if (tiling_flags & RADEON_TILING_SWAP_32BIT) |
flags |= RADEON_SURF_AP0_SWP_32BPP | RADEON_SURF_AP1_SWP_32BPP; |
DRM_DEBUG("writing surface %d %d %x %x\n", reg, flags, offset, offset+obj_size-1); |
WREG32(RADEON_SURFACE0_INFO + surf_index, flags); |
WREG32(RADEON_SURFACE0_LOWER_BOUND + surf_index, offset); |
WREG32(RADEON_SURFACE0_UPPER_BOUND + surf_index, offset + obj_size - 1); |
return 0; |
} |
void r100_clear_surface_reg(struct radeon_device *rdev, int reg) |
{ |
int surf_index = reg * 16; |
WREG32(RADEON_SURFACE0_INFO + surf_index, 0); |
} |
void r100_bandwidth_update(struct radeon_device *rdev) |
{ |
fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; |
fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; |
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
fixed20_12 memtcas_ff[8] = { |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init(0), |
}; |
fixed20_12 memtcas_rs480_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init_half(3), |
}; |
fixed20_12 memtcas2_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
}; |
fixed20_12 memtrbs[8] = { |
fixed_init(1), |
fixed_init_half(1), |
fixed_init(2), |
fixed_init_half(2), |
fixed_init(3), |
fixed_init_half(3), |
fixed_init(4), |
fixed_init_half(4) |
}; |
fixed20_12 memtrbs_r4xx[8] = { |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
fixed_init(8), |
fixed_init(9), |
fixed_init(10), |
fixed_init(11) |
}; |
fixed20_12 min_mem_eff; |
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
fixed20_12 cur_latency_mclk, cur_latency_sclk; |
fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, |
disp_drain_rate2, read_return_rate; |
fixed20_12 time_disp1_drop_priority; |
int c; |
int cur_size = 16; /* in octawords */ |
int critical_point = 0, critical_point2; |
/* uint32_t read_return_rate, time_disp1_drop_priority; */ |
int stop_req, max_stop_req; |
struct drm_display_mode *mode1 = NULL; |
struct drm_display_mode *mode2 = NULL; |
uint32_t pixel_bytes1 = 0; |
uint32_t pixel_bytes2 = 0; |
if (rdev->mode_info.crtcs[0]->base.enabled) { |
mode1 = &rdev->mode_info.crtcs[0]->base.mode; |
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8; |
} |
if (rdev->mode_info.crtcs[1]->base.enabled) { |
mode2 = &rdev->mode_info.crtcs[1]->base.mode; |
pixel_bytes2 = rdev->mode_info.crtcs[1]->base.fb->bits_per_pixel / 8; |
} |
min_mem_eff.full = rfixed_const_8(0); |
/* get modes */ |
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); |
mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); |
/* check crtc enables */ |
if (mode2) |
mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); |
if (mode1) |
mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); |
WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); |
} |
/* |
* determine is there is enough bw for current mode |
*/ |
mclk_ff.full = rfixed_const(rdev->clock.default_mclk); |
temp_ff.full = rfixed_const(100); |
mclk_ff.full = rfixed_div(mclk_ff, temp_ff); |
sclk_ff.full = rfixed_const(rdev->clock.default_sclk); |
sclk_ff.full = rfixed_div(sclk_ff, temp_ff); |
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
temp_ff.full = rfixed_const(temp); |
mem_bw.full = rfixed_mul(mclk_ff, temp_ff); |
pix_clk.full = 0; |
pix_clk2.full = 0; |
peak_disp_bw.full = 0; |
if (mode1) { |
temp_ff.full = rfixed_const(1000); |
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ |
pix_clk.full = rfixed_div(pix_clk, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes1); |
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); |
} |
if (mode2) { |
temp_ff.full = rfixed_const(1000); |
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ |
pix_clk2.full = rfixed_div(pix_clk2, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes2); |
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); |
} |
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); |
if (peak_disp_bw.full >= mem_bw.full) { |
DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
} |
/* Get values from the EXT_MEM_CNTL register...converting its contents. */ |
temp = RREG32(RADEON_MEM_TIMING_CNTL); |
if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ |
mem_trcd = ((temp >> 2) & 0x3) + 1; |
mem_trp = ((temp & 0x3)) + 1; |
mem_tras = ((temp & 0x70) >> 4) + 1; |
} else if (rdev->family == CHIP_R300 || |
rdev->family == CHIP_R350) { /* r300, r350 */ |
mem_trcd = (temp & 0x7) + 1; |
mem_trp = ((temp >> 8) & 0x7) + 1; |
mem_tras = ((temp >> 11) & 0xf) + 4; |
} else if (rdev->family == CHIP_RV350 || |
rdev->family <= CHIP_RV380) { |
/* rv3x0 */ |
mem_trcd = (temp & 0x7) + 3; |
mem_trp = ((temp >> 8) & 0x7) + 3; |
mem_tras = ((temp >> 11) & 0xf) + 6; |
} else if (rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423 || |
rdev->family == CHIP_RV410) { |
/* r4xx */ |
mem_trcd = (temp & 0xf) + 3; |
if (mem_trcd > 15) |
mem_trcd = 15; |
mem_trp = ((temp >> 8) & 0xf) + 3; |
if (mem_trp > 15) |
mem_trp = 15; |
mem_tras = ((temp >> 12) & 0x1f) + 6; |
if (mem_tras > 31) |
mem_tras = 31; |
} else { /* RV200, R200 */ |
mem_trcd = (temp & 0x7) + 1; |
mem_trp = ((temp >> 8) & 0x7) + 1; |
mem_tras = ((temp >> 12) & 0xf) + 4; |
} |
/* convert to FF */ |
trcd_ff.full = rfixed_const(mem_trcd); |
trp_ff.full = rfixed_const(mem_trp); |
tras_ff.full = rfixed_const(mem_tras); |
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
data = (temp & (7 << 20)) >> 20; |
if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { |
if (rdev->family == CHIP_RS480) /* don't think rs400 */ |
tcas_ff = memtcas_rs480_ff[data]; |
else |
tcas_ff = memtcas_ff[data]; |
} else |
tcas_ff = memtcas2_ff[data]; |
if (rdev->family == CHIP_RS400 || |
rdev->family == CHIP_RS480) { |
/* extra cas latency stored in bits 23-25 0-4 clocks */ |
data = (temp >> 23) & 0x7; |
if (data < 5) |
tcas_ff.full += rfixed_const(data); |
} |
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
/* on the R300, Tcas is included in Trbs. |
*/ |
temp = RREG32(RADEON_MEM_CNTL); |
data = (R300_MEM_NUM_CHANNELS_MASK & temp); |
if (data == 1) { |
if (R300_MEM_USE_CD_CH_ONLY & temp) { |
temp = RREG32(R300_MC_IND_INDEX); |
temp &= ~R300_MC_IND_ADDR_MASK; |
temp |= R300_MC_READ_CNTL_CD_mcind; |
WREG32(R300_MC_IND_INDEX, temp); |
temp = RREG32(R300_MC_IND_DATA); |
data = (R300_MEM_RBS_POSITION_C_MASK & temp); |
} else { |
temp = RREG32(R300_MC_READ_CNTL_AB); |
data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
} |
} else { |
temp = RREG32(R300_MC_READ_CNTL_AB); |
data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
} |
if (rdev->family == CHIP_RV410 || |
rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423) |
trbs_ff = memtrbs_r4xx[data]; |
else |
trbs_ff = memtrbs[data]; |
tcas_ff.full += trbs_ff.full; |
} |
sclk_eff_ff.full = sclk_ff.full; |
if (rdev->flags & RADEON_IS_AGP) { |
fixed20_12 agpmode_ff; |
agpmode_ff.full = rfixed_const(radeon_agpmode); |
temp_ff.full = rfixed_const_666(16); |
sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); |
} |
/* TODO PCIE lanes may affect this - agpmode == 16?? */ |
if (ASIC_IS_R300(rdev)) { |
sclk_delay_ff.full = rfixed_const(250); |
} else { |
if ((rdev->family == CHIP_RV100) || |
rdev->flags & RADEON_IS_IGP) { |
if (rdev->mc.vram_is_ddr) |
sclk_delay_ff.full = rfixed_const(41); |
else |
sclk_delay_ff.full = rfixed_const(33); |
} else { |
if (rdev->mc.vram_width == 128) |
sclk_delay_ff.full = rfixed_const(57); |
else |
sclk_delay_ff.full = rfixed_const(41); |
} |
} |
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); |
if (rdev->mc.vram_is_ddr) { |
if (rdev->mc.vram_width == 32) { |
k1.full = rfixed_const(40); |
c = 3; |
} else { |
k1.full = rfixed_const(20); |
c = 1; |
} |
} else { |
k1.full = rfixed_const(40); |
c = 3; |
} |
temp_ff.full = rfixed_const(2); |
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); |
temp_ff.full = rfixed_const(c); |
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); |
temp_ff.full = rfixed_const(4); |
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); |
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); |
mc_latency_mclk.full += k1.full; |
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); |
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); |
/* |
HW cursor time assuming worst case of full size colour cursor. |
*/ |
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
temp_ff.full += trcd_ff.full; |
if (temp_ff.full < tras_ff.full) |
temp_ff.full = tras_ff.full; |
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); |
temp_ff.full = rfixed_const(cur_size); |
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); |
/* |
Find the total latency for the display data. |
*/ |
disp_latency_overhead.full = rfixed_const(80); |
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); |
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
if (mc_latency_mclk.full > mc_latency_sclk.full) |
disp_latency.full = mc_latency_mclk.full; |
else |
disp_latency.full = mc_latency_sclk.full; |
/* setup Max GRPH_STOP_REQ default value */ |
if (ASIC_IS_RV100(rdev)) |
max_stop_req = 0x5c; |
else |
max_stop_req = 0x7c; |
if (mode1) { |
/* CRTC1 |
Set GRPH_BUFFER_CNTL register using h/w defined optimal values. |
GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] |
*/ |
stop_req = mode1->hdisplay * pixel_bytes1 / 16; |
if (stop_req > max_stop_req) |
stop_req = max_stop_req; |
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes1)); |
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); |
/* |
Find the critical point of the display buffer. |
*/ |
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); |
crit_point_ff.full += rfixed_const_half(0); |
critical_point = rfixed_trunc(crit_point_ff); |
if (rdev->disp_priority == 2) { |
critical_point = 0; |
} |
/* |
The critical point should never be above max_stop_req-4. Setting |
GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. |
*/ |
if (max_stop_req - critical_point < 4) |
critical_point = 0; |
if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { |
/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ |
critical_point = 0x10; |
} |
temp = RREG32(RADEON_GRPH_BUFFER_CNTL); |
temp &= ~(RADEON_GRPH_STOP_REQ_MASK); |
temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
temp &= ~(RADEON_GRPH_START_REQ_MASK); |
if ((rdev->family == CHIP_R350) && |
(stop_req > 0x15)) { |
stop_req -= 0x10; |
} |
temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
temp |= RADEON_GRPH_BUFFER_SIZE; |
temp &= ~(RADEON_GRPH_CRITICAL_CNTL | |
RADEON_GRPH_CRITICAL_AT_SOF | |
RADEON_GRPH_STOP_CNTL); |
/* |
Write the result into the register. |
*/ |
WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
(critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
#if 0 |
if ((rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) { |
/* attempt to program RS400 disp regs correctly ??? */ |
temp = RREG32(RS400_DISP1_REG_CNTL); |
temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | |
RS400_DISP1_STOP_REQ_LEVEL_MASK); |
WREG32(RS400_DISP1_REQ_CNTL1, (temp | |
(critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
(critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
temp = RREG32(RS400_DMIF_MEM_CNTL1); |
temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | |
RS400_DISP1_CRITICAL_POINT_STOP_MASK); |
WREG32(RS400_DMIF_MEM_CNTL1, (temp | |
(critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | |
(critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); |
} |
#endif |
DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", |
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */ |
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); |
} |
if (mode2) { |
u32 grph2_cntl; |
stop_req = mode2->hdisplay * pixel_bytes2 / 16; |
if (stop_req > max_stop_req) |
stop_req = max_stop_req; |
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes2)); |
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); |
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); |
if ((rdev->family == CHIP_R350) && |
(stop_req > 0x15)) { |
stop_req -= 0x10; |
} |
grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; |
grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | |
RADEON_GRPH_CRITICAL_AT_SOF | |
RADEON_GRPH_STOP_CNTL); |
if ((rdev->family == CHIP_RS100) || |
(rdev->family == CHIP_RS200)) |
critical_point2 = 0; |
else { |
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
temp_ff.full = rfixed_const(temp); |
temp_ff.full = rfixed_mul(mclk_ff, temp_ff); |
if (sclk_ff.full < temp_ff.full) |
temp_ff.full = sclk_ff.full; |
read_return_rate.full = temp_ff.full; |
if (mode1) { |
temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); |
} else { |
time_disp1_drop_priority.full = 0; |
} |
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); |
crit_point_ff.full += rfixed_const_half(0); |
critical_point2 = rfixed_trunc(crit_point_ff); |
if (rdev->disp_priority == 2) { |
critical_point2 = 0; |
} |
if (max_stop_req - critical_point2 < 4) |
critical_point2 = 0; |
} |
if (critical_point2 == 0 && rdev->family == CHIP_R300) { |
/* some R300 cards have problem with this set to 0 */ |
critical_point2 = 0x10; |
} |
WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
(critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
if ((rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) { |
#if 0 |
/* attempt to program RS400 disp2 regs correctly ??? */ |
temp = RREG32(RS400_DISP2_REQ_CNTL1); |
temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | |
RS400_DISP2_STOP_REQ_LEVEL_MASK); |
WREG32(RS400_DISP2_REQ_CNTL1, (temp | |
(critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
(critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
temp = RREG32(RS400_DISP2_REQ_CNTL2); |
temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | |
RS400_DISP2_CRITICAL_POINT_STOP_MASK); |
WREG32(RS400_DISP2_REQ_CNTL2, (temp | |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); |
#endif |
WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); |
WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); |
WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); |
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); |
} |
DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", |
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
} |
} |
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save) |
{ |
/* Shutdown CP we shouldn't need to do that but better be safe than |
* sorry |
*/ |
rdev->cp.ready = false; |
WREG32(R_000740_CP_CSQ_CNTL, 0); |
/* Save few CRTC registers */ |
save->GENMO_WT = RREG32(R_0003C0_GENMO_WT); |
save->CRTC_EXT_CNTL = RREG32(R_000054_CRTC_EXT_CNTL); |
save->CRTC_GEN_CNTL = RREG32(R_000050_CRTC_GEN_CNTL); |
save->CUR_OFFSET = RREG32(R_000260_CUR_OFFSET); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
save->CRTC2_GEN_CNTL = RREG32(R_0003F8_CRTC2_GEN_CNTL); |
save->CUR2_OFFSET = RREG32(R_000360_CUR2_OFFSET); |
} |
/* Disable VGA aperture access */ |
WREG32(R_0003C0_GENMO_WT, C_0003C0_VGA_RAM_EN & save->GENMO_WT); |
/* Disable cursor, overlay, crtc */ |
WREG32(R_000260_CUR_OFFSET, save->CUR_OFFSET | S_000260_CUR_LOCK(1)); |
WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL | |
S_000054_CRTC_DISPLAY_DIS(1)); |
WREG32(R_000050_CRTC_GEN_CNTL, |
(C_000050_CRTC_CUR_EN & save->CRTC_GEN_CNTL) | |
S_000050_CRTC_DISP_REQ_EN_B(1)); |
WREG32(R_000420_OV0_SCALE_CNTL, |
C_000420_OV0_OVERLAY_EN & RREG32(R_000420_OV0_SCALE_CNTL)); |
WREG32(R_000260_CUR_OFFSET, C_000260_CUR_LOCK & save->CUR_OFFSET); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
WREG32(R_000360_CUR2_OFFSET, save->CUR2_OFFSET | |
S_000360_CUR2_LOCK(1)); |
WREG32(R_0003F8_CRTC2_GEN_CNTL, |
(C_0003F8_CRTC2_CUR_EN & save->CRTC2_GEN_CNTL) | |
S_0003F8_CRTC2_DISPLAY_DIS(1) | |
S_0003F8_CRTC2_DISP_REQ_EN_B(1)); |
WREG32(R_000360_CUR2_OFFSET, |
C_000360_CUR2_LOCK & save->CUR2_OFFSET); |
} |
} |
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save) |
{ |
/* Update base address for crtc */ |
WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, |
rdev->mc.vram_location); |
} |
/* Restore CRTC registers */ |
WREG32(R_0003C0_GENMO_WT, save->GENMO_WT); |
WREG32(R_000054_CRTC_EXT_CNTL, save->CRTC_EXT_CNTL); |
WREG32(R_000050_CRTC_GEN_CNTL, save->CRTC_GEN_CNTL); |
if (!(rdev->flags & RADEON_SINGLE_CRTC)) { |
WREG32(R_0003F8_CRTC2_GEN_CNTL, save->CRTC2_GEN_CNTL); |
} |
} |
int drm_order(unsigned long size) |
{ |
int order; |
unsigned long tmp; |
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; |
if (size & (size - 1)) |
++order; |
return order; |
} |
/drivers/video/drm/radeon/r300.c |
---|
25,17 → 25,12 |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include <linux/seq_file.h> |
//#include <linux/seq_file.h> |
#include "drmP.h" |
#include "drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "radeon_drm.h" |
#include "r300d.h" |
#include "r300_reg_safe.h" |
/* r300,r350,rv350,rv370,rv380 depends on : */ |
void r100_hdp_reset(struct radeon_device *rdev); |
int r100_cp_reset(struct radeon_device *rdev); |
42,6 → 37,7 |
int r100_rb2d_reset(struct radeon_device *rdev); |
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); |
int r100_pci_gart_enable(struct radeon_device *rdev); |
void r100_pci_gart_disable(struct radeon_device *rdev); |
void r100_mc_setup(struct radeon_device *rdev); |
void r100_mc_disable_clients(struct radeon_device *rdev); |
int r100_gui_wait_for_idle(struct radeon_device *rdev); |
48,11 → 44,14 |
int r100_cs_packet_parse(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt, |
unsigned idx); |
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p); |
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, |
struct radeon_cs_reloc **cs_reloc); |
int r100_cs_parse_packet0(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt, |
const unsigned *auth, unsigned n, |
radeon_packet0_check_t check); |
void r100_cs_dump_packet(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt); |
int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt, |
struct radeon_object *robj); |
81,61 → 80,30 |
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
(void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
} |
mb(); |
} |
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
{ |
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
if (i < 0 || i > rdev->gart.num_gpu_pages) { |
return -EINVAL; |
} |
addr = (lower_32_bits(addr) >> 8) | |
((upper_32_bits(addr) & 0xff) << 24) | |
0xc; |
/* on x86 we want this to be CPU endian, on powerpc |
* on powerpc without HW swappers, it'll get swapped on way |
* into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
writel(addr, ((void __iomem *)ptr) + (i * 4)); |
return 0; |
} |
int rv370_pcie_gart_init(struct radeon_device *rdev) |
int rv370_pcie_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t table_addr; |
uint32_t tmp; |
int r; |
if (rdev->gart.table.vram.robj) { |
WARN(1, "RV370 PCIE GART already initialized.\n"); |
return 0; |
} |
/* Initialize common gart structure */ |
r = radeon_gart_init(rdev); |
if (r) |
if (r) { |
return r; |
} |
r = rv370_debugfs_pcie_gart_info_init(rdev); |
if (r) |
if (r) { |
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n"); |
} |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
return radeon_gart_table_vram_alloc(rdev); |
r = radeon_gart_table_vram_alloc(rdev); |
if (r) { |
return r; |
} |
int rv370_pcie_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t table_addr; |
uint32_t tmp; |
int r; |
if (rdev->gart.table.vram.robj == NULL) { |
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n"); |
return -EINVAL; |
} |
r = radeon_gart_table_vram_pin(rdev); |
if (r) |
return r; |
/* discard memory request outside of configured range */ |
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
157,7 → 125,7 |
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
rv370_pcie_gart_tlb_flush(rdev); |
DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n", |
(unsigned)(rdev->mc.gtt_size >> 20), table_addr); |
rdev->mc.gtt_size >> 20, table_addr); |
rdev->gart.ready = true; |
return 0; |
} |
175,13 → 143,40 |
} |
} |
void rv370_pcie_gart_fini(struct radeon_device *rdev) |
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
{ |
void __iomem *ptr = (void *)rdev->gart.table.vram.ptr; |
if (i < 0 || i > rdev->gart.num_gpu_pages) { |
return -EINVAL; |
} |
addr = (((u32)addr) >> 8) | ((upper_32_bits(addr) & 0xff) << 4) | 0xC; |
writel(cpu_to_le32(addr), ((void __iomem *)ptr) + (i * 4)); |
return 0; |
} |
int r300_gart_enable(struct radeon_device *rdev) |
{ |
#if __OS_HAS_AGP |
if (rdev->flags & RADEON_IS_AGP) { |
if (rdev->family > CHIP_RV350) { |
rv370_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
radeon_gart_fini(rdev); |
} else { |
r100_pci_gart_disable(rdev); |
} |
return 0; |
} |
#endif |
if (rdev->flags & RADEON_IS_PCIE) { |
rdev->asic->gart_disable = &rv370_pcie_gart_disable; |
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
return rv370_pcie_gart_enable(rdev); |
} |
return r100_pci_gart_enable(rdev); |
} |
/* |
* MC |
*/ |
202,6 → 197,16 |
/* Setup GPU memory space */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
rdev->mc.gtt_location = 0xFFFFFFFFUL; |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
if (r) { |
printk(KERN_WARNING "[drm] Disabling AGP\n"); |
rdev->flags &= ~RADEON_IS_AGP; |
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
} else { |
rdev->mc.gtt_location = rdev->mc.agp_base; |
} |
} |
r = radeon_mc_setup(rdev); |
if (r) { |
return r; |
219,7 → 224,15 |
void r300_mc_fini(struct radeon_device *rdev) |
{ |
if (rdev->flags & RADEON_IS_PCIE) { |
rv370_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
} else { |
r100_pci_gart_disable(rdev); |
radeon_gart_table_ram_free(rdev); |
} |
radeon_gart_fini(rdev); |
} |
/* |
431,7 → 444,6 |
/* rv350,rv370,rv380 */ |
rdev->num_gb_pipes = 1; |
} |
rdev->num_z_pipes = 1; |
gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
switch (rdev->num_gb_pipes) { |
case 2: |
470,8 → 482,7 |
printk(KERN_WARNING "Failed to wait MC idle while " |
"programming pipes. Bad things might happen.\n"); |
} |
DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n", |
rdev->num_gb_pipes, rdev->num_z_pipes); |
DRM_INFO("radeon: %d pipes initialized.\n", rdev->num_gb_pipes); |
} |
int r300_ga_reset(struct radeon_device *rdev) |
572,12 → 583,35 |
} else { |
rdev->mc.vram_width = 64; |
} |
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
r100_vram_init_sizes(rdev); |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
} |
/* |
* Indirect registers accessor |
*/ |
uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
uint32_t r; |
WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); |
(void)RREG32(RADEON_PCIE_INDEX); |
r = RREG32(RADEON_PCIE_DATA); |
return r; |
} |
void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
WREG8(RADEON_PCIE_INDEX, ((reg) & 0xff)); |
(void)RREG32(RADEON_PCIE_INDEX); |
WREG32(RADEON_PCIE_DATA, (v)); |
(void)RREG32(RADEON_PCIE_DATA); |
} |
/* |
* PCIE Lanes |
*/ |
688,6 → 722,304 |
/* |
* CS functions |
*/ |
struct r300_cs_track_cb { |
struct radeon_object *robj; |
unsigned pitch; |
unsigned cpp; |
unsigned offset; |
}; |
struct r300_cs_track_array { |
struct radeon_object *robj; |
unsigned esize; |
}; |
struct r300_cs_track_texture { |
struct radeon_object *robj; |
unsigned pitch; |
unsigned width; |
unsigned height; |
unsigned num_levels; |
unsigned cpp; |
unsigned tex_coord_type; |
unsigned txdepth; |
unsigned width_11; |
unsigned height_11; |
bool use_pitch; |
bool enabled; |
bool roundup_w; |
bool roundup_h; |
}; |
struct r300_cs_track { |
unsigned num_cb; |
unsigned maxy; |
unsigned vtx_size; |
unsigned vap_vf_cntl; |
unsigned immd_dwords; |
unsigned num_arrays; |
unsigned max_indx; |
struct r300_cs_track_array arrays[11]; |
struct r300_cs_track_cb cb[4]; |
struct r300_cs_track_cb zb; |
struct r300_cs_track_texture textures[16]; |
bool z_enabled; |
}; |
static inline void r300_cs_track_texture_print(struct r300_cs_track_texture *t) |
{ |
DRM_ERROR("pitch %d\n", t->pitch); |
DRM_ERROR("width %d\n", t->width); |
DRM_ERROR("height %d\n", t->height); |
DRM_ERROR("num levels %d\n", t->num_levels); |
DRM_ERROR("depth %d\n", t->txdepth); |
DRM_ERROR("bpp %d\n", t->cpp); |
DRM_ERROR("coordinate type %d\n", t->tex_coord_type); |
DRM_ERROR("width round to power of 2 %d\n", t->roundup_w); |
DRM_ERROR("height round to power of 2 %d\n", t->roundup_h); |
} |
static inline int r300_cs_track_texture_check(struct radeon_device *rdev, |
struct r300_cs_track *track) |
{ |
struct radeon_object *robj; |
unsigned long size; |
unsigned u, i, w, h; |
for (u = 0; u < 16; u++) { |
if (!track->textures[u].enabled) |
continue; |
robj = track->textures[u].robj; |
if (robj == NULL) { |
DRM_ERROR("No texture bound to unit %u\n", u); |
return -EINVAL; |
} |
size = 0; |
for (i = 0; i <= track->textures[u].num_levels; i++) { |
if (track->textures[u].use_pitch) { |
w = track->textures[u].pitch / (1 << i); |
} else { |
w = track->textures[u].width / (1 << i); |
if (rdev->family >= CHIP_RV515) |
w |= track->textures[u].width_11; |
if (track->textures[u].roundup_w) |
w = roundup_pow_of_two(w); |
} |
h = track->textures[u].height / (1 << i); |
if (rdev->family >= CHIP_RV515) |
h |= track->textures[u].height_11; |
if (track->textures[u].roundup_h) |
h = roundup_pow_of_two(h); |
size += w * h; |
} |
size *= track->textures[u].cpp; |
switch (track->textures[u].tex_coord_type) { |
case 0: |
break; |
case 1: |
size *= (1 << track->textures[u].txdepth); |
break; |
case 2: |
size *= 6; |
break; |
default: |
DRM_ERROR("Invalid texture coordinate type %u for unit " |
"%u\n", track->textures[u].tex_coord_type, u); |
return -EINVAL; |
} |
if (size > radeon_object_size(robj)) { |
DRM_ERROR("Texture of unit %u needs %lu bytes but is " |
"%lu\n", u, size, radeon_object_size(robj)); |
r300_cs_track_texture_print(&track->textures[u]); |
return -EINVAL; |
} |
} |
return 0; |
} |
int r300_cs_track_check(struct radeon_device *rdev, struct r300_cs_track *track) |
{ |
unsigned i; |
unsigned long size; |
unsigned prim_walk; |
unsigned nverts; |
for (i = 0; i < track->num_cb; i++) { |
if (track->cb[i].robj == NULL) { |
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); |
return -EINVAL; |
} |
size = track->cb[i].pitch * track->cb[i].cpp * track->maxy; |
size += track->cb[i].offset; |
if (size > radeon_object_size(track->cb[i].robj)) { |
DRM_ERROR("[drm] Buffer too small for color buffer %d " |
"(need %lu have %lu) !\n", i, size, |
radeon_object_size(track->cb[i].robj)); |
DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n", |
i, track->cb[i].pitch, track->cb[i].cpp, |
track->cb[i].offset, track->maxy); |
return -EINVAL; |
} |
} |
if (track->z_enabled) { |
if (track->zb.robj == NULL) { |
DRM_ERROR("[drm] No buffer for z buffer !\n"); |
return -EINVAL; |
} |
size = track->zb.pitch * track->zb.cpp * track->maxy; |
size += track->zb.offset; |
if (size > radeon_object_size(track->zb.robj)) { |
DRM_ERROR("[drm] Buffer too small for z buffer " |
"(need %lu have %lu) !\n", size, |
radeon_object_size(track->zb.robj)); |
return -EINVAL; |
} |
} |
prim_walk = (track->vap_vf_cntl >> 4) & 0x3; |
nverts = (track->vap_vf_cntl >> 16) & 0xFFFF; |
switch (prim_walk) { |
case 1: |
for (i = 0; i < track->num_arrays; i++) { |
size = track->arrays[i].esize * track->max_indx * 4; |
if (track->arrays[i].robj == NULL) { |
DRM_ERROR("(PW %u) Vertex array %u no buffer " |
"bound\n", prim_walk, i); |
return -EINVAL; |
} |
if (size > radeon_object_size(track->arrays[i].robj)) { |
DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " |
"have %lu dwords\n", prim_walk, i, |
size >> 2, |
radeon_object_size(track->arrays[i].robj) >> 2); |
DRM_ERROR("Max indices %u\n", track->max_indx); |
return -EINVAL; |
} |
} |
break; |
case 2: |
for (i = 0; i < track->num_arrays; i++) { |
size = track->arrays[i].esize * (nverts - 1) * 4; |
if (track->arrays[i].robj == NULL) { |
DRM_ERROR("(PW %u) Vertex array %u no buffer " |
"bound\n", prim_walk, i); |
return -EINVAL; |
} |
if (size > radeon_object_size(track->arrays[i].robj)) { |
DRM_ERROR("(PW %u) Vertex array %u need %lu dwords " |
"have %lu dwords\n", prim_walk, i, size >> 2, |
radeon_object_size(track->arrays[i].robj) >> 2); |
return -EINVAL; |
} |
} |
break; |
case 3: |
size = track->vtx_size * nverts; |
if (size != track->immd_dwords) { |
DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n", |
track->immd_dwords, size); |
DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n", |
nverts, track->vtx_size); |
return -EINVAL; |
} |
break; |
default: |
DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n", |
prim_walk); |
return -EINVAL; |
} |
return r300_cs_track_texture_check(rdev, track); |
} |
static inline void r300_cs_track_clear(struct r300_cs_track *track) |
{ |
unsigned i; |
track->num_cb = 4; |
track->maxy = 4096; |
for (i = 0; i < track->num_cb; i++) { |
track->cb[i].robj = NULL; |
track->cb[i].pitch = 8192; |
track->cb[i].cpp = 16; |
track->cb[i].offset = 0; |
} |
track->z_enabled = true; |
track->zb.robj = NULL; |
track->zb.pitch = 8192; |
track->zb.cpp = 4; |
track->zb.offset = 0; |
track->vtx_size = 0x7F; |
track->immd_dwords = 0xFFFFFFFFUL; |
track->num_arrays = 11; |
track->max_indx = 0x00FFFFFFUL; |
for (i = 0; i < track->num_arrays; i++) { |
track->arrays[i].robj = NULL; |
track->arrays[i].esize = 0x7F; |
} |
for (i = 0; i < 16; i++) { |
track->textures[i].pitch = 16536; |
track->textures[i].width = 16536; |
track->textures[i].height = 16536; |
track->textures[i].width_11 = 1 << 11; |
track->textures[i].height_11 = 1 << 11; |
track->textures[i].num_levels = 12; |
track->textures[i].txdepth = 16; |
track->textures[i].cpp = 64; |
track->textures[i].tex_coord_type = 1; |
track->textures[i].robj = NULL; |
/* CS IB emission code makes sure texture unit are disabled */ |
track->textures[i].enabled = false; |
track->textures[i].roundup_w = true; |
track->textures[i].roundup_h = true; |
} |
} |
#endif |
static const unsigned r300_reg_safe_bm[159] = { |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, |
0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, |
0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF, |
0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF, |
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, |
0x00000000, 0x0000C100, 0x00000000, 0x00000000, |
0x00000000, 0x00000000, 0x00000000, 0x00000000, |
0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF, |
0x00000000, 0x00000000, 0x00000000, 0x00000000, |
0x0003FC01, 0xFFFFFFF8, 0xFE800B19, |
}; |
#if 0 |
static int r300_packet0_check(struct radeon_cs_parser *p, |
struct radeon_cs_packet *pkt, |
unsigned idx, unsigned reg) |
694,19 → 1026,19 |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct radeon_cs_reloc *reloc; |
struct r100_cs_track *track; |
struct r300_cs_track *track; |
volatile uint32_t *ib; |
uint32_t tmp, tile_flags = 0; |
uint32_t tmp; |
unsigned i; |
int r; |
ib = p->ib->ptr; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
track = (struct r100_cs_track *)p->track; |
track = (struct r300_cs_track*)p->track; |
switch(reg) { |
case AVIVO_D1MODE_VLINE_START_END: |
case RADEON_CRTC_GUI_TRIG_VLINE: |
r = r100_cs_packet_parse_vline(p); |
case RADEON_DST_PITCH_OFFSET: |
case RADEON_SRC_PITCH_OFFSET: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
713,13 → 1045,10 |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
tmp = ib_chunk->kdata[idx] & 0x003fffff; |
tmp += (((u32)reloc->lobj.gpu_offset) >> 10); |
ib[idx] = (ib_chunk->kdata[idx] & 0xffc00000) | tmp; |
break; |
case RADEON_DST_PITCH_OFFSET: |
case RADEON_SRC_PITCH_OFFSET: |
r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
if (r) |
return r; |
break; |
case R300_RB3D_COLOROFFSET0: |
case R300_RB3D_COLOROFFSET1: |
case R300_RB3D_COLOROFFSET2: |
807,23 → 1136,6 |
/* RB3D_COLORPITCH1 */ |
/* RB3D_COLORPITCH2 */ |
/* RB3D_COLORPITCH3 */ |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
tile_flags |= R300_COLOR_TILE_ENABLE; |
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); |
tmp |= tile_flags; |
ib[idx] = tmp; |
i = (reg - 0x4E38) >> 2; |
track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; |
switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { |
879,23 → 1191,6 |
break; |
case 0x4F24: |
/* ZB_DEPTHPITCH */ |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) |
tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) |
tile_flags |= R300_DEPTHMICROTILE_TILED;; |
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); |
tmp |= tile_flags; |
ib[idx] = tmp; |
track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; |
break; |
case 0x4104: |
927,41 → 1222,42 |
tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; |
track->textures[i].tex_coord_type = tmp; |
switch ((ib_chunk->kdata[idx] & 0x1F)) { |
case R300_TX_FORMAT_X8: |
case R300_TX_FORMAT_Y4X4: |
case R300_TX_FORMAT_Z3Y3X2: |
case 0: |
case 2: |
case 5: |
case 18: |
case 20: |
case 21: |
track->textures[i].cpp = 1; |
break; |
case R300_TX_FORMAT_X16: |
case R300_TX_FORMAT_Y8X8: |
case R300_TX_FORMAT_Z5Y6X5: |
case R300_TX_FORMAT_Z6Y5X5: |
case R300_TX_FORMAT_W4Z4Y4X4: |
case R300_TX_FORMAT_W1Z5Y5X5: |
case R300_TX_FORMAT_DXT1: |
case R300_TX_FORMAT_D3DMFT_CxV8U8: |
case R300_TX_FORMAT_B8G8_B8G8: |
case R300_TX_FORMAT_G8R8_G8B8: |
case 1: |
case 3: |
case 6: |
case 7: |
case 10: |
case 11: |
case 19: |
case 22: |
case 24: |
track->textures[i].cpp = 2; |
break; |
case R300_TX_FORMAT_Y16X16: |
case R300_TX_FORMAT_Z11Y11X10: |
case R300_TX_FORMAT_Z10Y11X11: |
case R300_TX_FORMAT_W8Z8Y8X8: |
case R300_TX_FORMAT_W2Z10Y10X10: |
case 0x17: |
case R300_TX_FORMAT_FL_I32: |
case 0x1e: |
case R300_TX_FORMAT_DXT3: |
case R300_TX_FORMAT_DXT5: |
case 4: |
case 8: |
case 9: |
case 12: |
case 13: |
case 23: |
case 25: |
case 27: |
case 30: |
track->textures[i].cpp = 4; |
break; |
case R300_TX_FORMAT_W16Z16Y16X16: |
case R300_TX_FORMAT_FL_R16G16B16A16: |
case R300_TX_FORMAT_FL_I32A32: |
case 14: |
case 26: |
case 28: |
track->textures[i].cpp = 8; |
break; |
case R300_TX_FORMAT_FL_R32G32B32A32: |
case 29: |
track->textures[i].cpp = 16; |
break; |
default: |
989,11 → 1285,11 |
case 0x443C: |
/* TX_FILTER0_[0-15] */ |
i = (reg - 0x4400) >> 2; |
tmp = ib_chunk->kdata[idx] & 0x7; |
tmp = ib_chunk->kdata[idx] & 0x7;; |
if (tmp == 2 || tmp == 4 || tmp == 6) { |
track->textures[i].roundup_w = false; |
} |
tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; |
tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;; |
if (tmp == 2 || tmp == 4 || tmp == 6) { |
track->textures[i].roundup_h = false; |
} |
1054,21 → 1350,6 |
tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; |
track->textures[i].txdepth = tmp; |
break; |
case R300_ZB_ZPASS_ADDR: |
r = r100_cs_packet_next_reloc(p, &reloc); |
if (r) { |
DRM_ERROR("No reloc for ib[%d]=0x%04X\n", |
idx, reg); |
r100_cs_dump_packet(p, pkt); |
return r; |
} |
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); |
break; |
case 0x4be8: |
/* valid register only on RV530 */ |
if (p->rdev->family == CHIP_RV530) |
break; |
/* fallthrough do not move */ |
default: |
printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", |
reg, idx); |
1081,9 → 1362,8 |
struct radeon_cs_packet *pkt) |
{ |
struct radeon_cs_chunk *ib_chunk; |
struct radeon_cs_reloc *reloc; |
struct r100_cs_track *track; |
struct r300_cs_track *track; |
volatile uint32_t *ib; |
unsigned idx; |
unsigned i, c; |
1092,7 → 1372,7 |
ib = p->ib->ptr; |
ib_chunk = &p->chunks[p->chunk_ib_idx]; |
idx = pkt->idx + 1; |
track = (struct r100_cs_track *)p->track; |
track = (struct r300_cs_track*)p->track; |
switch(pkt->opcode) { |
case PACKET3_3D_LOAD_VBPNTR: |
c = ib_chunk->kdata[idx++] & 0x1F; |
1159,7 → 1439,7 |
} |
track->vap_vf_cntl = ib_chunk->kdata[idx+1]; |
track->immd_dwords = pkt->count - 1; |
r = r100_cs_track_check(p->rdev, track); |
r = r300_cs_track_check(p->rdev, track); |
if (r) { |
return r; |
} |
1174,7 → 1454,7 |
} |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
track->immd_dwords = pkt->count; |
r = r100_cs_track_check(p->rdev, track); |
r = r300_cs_track_check(p->rdev, track); |
if (r) { |
return r; |
} |
1181,7 → 1461,7 |
break; |
case PACKET3_3D_DRAW_VBUF: |
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; |
r = r100_cs_track_check(p->rdev, track); |
r = r300_cs_track_check(p->rdev, track); |
if (r) { |
return r; |
} |
1188,7 → 1468,7 |
break; |
case PACKET3_3D_DRAW_VBUF_2: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
r = r100_cs_track_check(p->rdev, track); |
r = r300_cs_track_check(p->rdev, track); |
if (r) { |
return r; |
} |
1195,7 → 1475,7 |
break; |
case PACKET3_3D_DRAW_INDX: |
track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; |
r = r100_cs_track_check(p->rdev, track); |
r = r300_cs_track_check(p->rdev, track); |
if (r) { |
return r; |
} |
1202,7 → 1482,7 |
break; |
case PACKET3_3D_DRAW_INDX_2: |
track->vap_vf_cntl = ib_chunk->kdata[idx]; |
r = r100_cs_track_check(p->rdev, track); |
r = r300_cs_track_check(p->rdev, track); |
if (r) { |
return r; |
} |
1219,12 → 1499,11 |
int r300_cs_parse(struct radeon_cs_parser *p) |
{ |
struct radeon_cs_packet pkt; |
struct r100_cs_track *track; |
struct r300_cs_track track; |
int r; |
track = kzalloc(sizeof(*track), GFP_KERNEL); |
r100_cs_track_clear(p->rdev, track); |
p->track = track; |
r300_cs_track_clear(&track); |
p->track = &track; |
do { |
r = r100_cs_packet_parse(p, &pkt, p->idx); |
if (r) { |
1253,51 → 1532,14 |
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
return 0; |
} |
#endif |
void r300_set_reg_safe(struct radeon_device *rdev) |
int r300_init(struct radeon_device *rdev) |
{ |
rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; |
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
} |
int r300_init(struct radeon_device *rdev) |
{ |
r300_set_reg_safe(rdev); |
return 0; |
} |
void r300_mc_program(struct radeon_device *rdev) |
{ |
struct r100_mc_save save; |
int r; |
r = r100_debugfs_mc_info_init(rdev); |
if (r) { |
dev_err(rdev->dev, "Failed to create r100_mc debugfs file.\n"); |
} |
/* Stops all mc clients */ |
r100_mc_stop(rdev, &save); |
if (rdev->flags & RADEON_IS_AGP) { |
WREG32(R_00014C_MC_AGP_LOCATION, |
S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
WREG32(R_00015C_AGP_BASE_2, |
upper_32_bits(rdev->mc.agp_base) & 0xff); |
} else { |
WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
WREG32(R_000170_AGP_BASE, 0); |
WREG32(R_00015C_AGP_BASE_2, 0); |
} |
/* Wait for mc idle */ |
if (r300_mc_wait_for_idle(rdev)) |
DRM_INFO("Failed to wait MC idle before programming MC.\n"); |
/* Program MC, should be a 32bits limited address space */ |
WREG32(R_000148_MC_FB_LOCATION, |
S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
r100_mc_resume(rdev, &save); |
} |
/drivers/video/drm/radeon/r520.c |
---|
31,6 → 31,8 |
/* r520,rv530,rv560,rv570,r580 depends on : */ |
void r100_hdp_reset(struct radeon_device *rdev); |
int rv370_pcie_gart_enable(struct radeon_device *rdev); |
void rv370_pcie_gart_disable(struct radeon_device *rdev); |
void r420_pipes_init(struct radeon_device *rdev); |
void rs600_mc_disable_clients(struct radeon_device *rdev); |
void rs600_disable_vga(struct radeon_device *rdev); |
45,7 → 47,6 |
void r520_gpu_init(struct radeon_device *rdev); |
int r520_mc_wait_for_idle(struct radeon_device *rdev); |
/* |
* MC |
*/ |
54,7 → 55,7 |
uint32_t tmp; |
int r; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
if (r100_debugfs_rbbm_init(rdev)) { |
DRM_ERROR("Failed to register debugfs file for RBBM !\n"); |
72,6 → 73,16 |
/* Setup GPU memory space */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
rdev->mc.gtt_location = 0xFFFFFFFFUL; |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
if (r) { |
printk(KERN_WARNING "[drm] Disabling AGP\n"); |
rdev->flags &= ~RADEON_IS_AGP; |
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
} else { |
rdev->mc.gtt_location = rdev->mc.agp_base; |
} |
} |
r = radeon_mc_setup(rdev); |
if (r) { |
return r; |
84,8 → 95,8 |
"programming pipes. Bad things might happen.\n"); |
} |
/* Write VRAM size in case we are limiting it */ |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32_MC(R520_MC_FB_LOCATION, tmp); |
104,7 → 115,7 |
WREG32_MC(R520_MC_AGP_BASE_2, 0); |
} |
LEAVE(); |
dbgprintf("done: %s\n",__FUNCTION__); |
return 0; |
} |
111,6 → 122,9 |
void r520_mc_fini(struct radeon_device *rdev) |
{ |
rv370_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
radeon_gart_fini(rdev); |
} |
141,7 → 155,7 |
void r520_gpu_init(struct radeon_device *rdev) |
{ |
unsigned pipe_select_current, gb_pipe_select, tmp; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
r100_hdp_reset(rdev); |
rs600_disable_vga(rdev); |
167,6 → 181,7 |
*/ |
/* workaround for RV530 */ |
if (rdev->family == CHIP_RV530) { |
WREG32(0x4124, 1); |
WREG32(0x4128, 0xFF); |
} |
r420_pipes_init(rdev); |
189,7 → 204,7 |
static void r520_vram_get_type(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
rdev->mc.vram_width = 128; |
rdev->mc.vram_is_ddr = true; |
217,20 → 232,164 |
void r520_vram_info(struct radeon_device *rdev) |
{ |
fixed20_12 a; |
r520_vram_get_type(rdev); |
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
r100_vram_init_sizes(rdev); |
/* FIXME: we should enforce default clock in case GPU is not in |
* default setup |
*/ |
a.full = rfixed_const(100); |
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); |
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
} |
void r520_bandwidth_update(struct radeon_device *rdev) |
int radeon_agp_init(struct radeon_device *rdev) |
{ |
rv515_bandwidth_avivo_update(rdev); |
dbgprintf("%s\n",__FUNCTION__); |
#if __OS_HAS_AGP |
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list; |
struct drm_agp_mode mode; |
struct drm_agp_info info; |
uint32_t agp_status; |
int default_mode; |
bool is_v3; |
int ret; |
/* Acquire AGP. */ |
if (!rdev->ddev->agp->acquired) { |
ret = drm_agp_acquire(rdev->ddev); |
if (ret) { |
DRM_ERROR("Unable to acquire AGP: %d\n", ret); |
return ret; |
} |
} |
ret = drm_agp_info(rdev->ddev, &info); |
if (ret) { |
DRM_ERROR("Unable to get AGP info: %d\n", ret); |
return ret; |
} |
mode.mode = info.mode; |
agp_status = (RREG32(RADEON_AGP_STATUS) | RADEON_AGPv3_MODE) & mode.mode; |
is_v3 = !!(agp_status & RADEON_AGPv3_MODE); |
if (is_v3) { |
default_mode = (agp_status & RADEON_AGPv3_8X_MODE) ? 8 : 4; |
} else { |
if (agp_status & RADEON_AGP_4X_MODE) { |
default_mode = 4; |
} else if (agp_status & RADEON_AGP_2X_MODE) { |
default_mode = 2; |
} else { |
default_mode = 1; |
} |
} |
/* Apply AGPMode Quirks */ |
while (p && p->chip_device != 0) { |
if (info.id_vendor == p->hostbridge_vendor && |
info.id_device == p->hostbridge_device && |
rdev->pdev->vendor == p->chip_vendor && |
rdev->pdev->device == p->chip_device && |
rdev->pdev->subsystem_vendor == p->subsys_vendor && |
rdev->pdev->subsystem_device == p->subsys_device) { |
default_mode = p->default_mode; |
} |
++p; |
} |
if (radeon_agpmode > 0) { |
if ((radeon_agpmode < (is_v3 ? 4 : 1)) || |
(radeon_agpmode > (is_v3 ? 8 : 4)) || |
(radeon_agpmode & (radeon_agpmode - 1))) { |
DRM_ERROR("Illegal AGP Mode: %d (valid %s), leaving at %d\n", |
radeon_agpmode, is_v3 ? "4, 8" : "1, 2, 4", |
default_mode); |
radeon_agpmode = default_mode; |
} else { |
DRM_INFO("AGP mode requested: %d\n", radeon_agpmode); |
} |
} else { |
radeon_agpmode = default_mode; |
} |
mode.mode &= ~RADEON_AGP_MODE_MASK; |
if (is_v3) { |
switch (radeon_agpmode) { |
case 8: |
mode.mode |= RADEON_AGPv3_8X_MODE; |
break; |
case 4: |
default: |
mode.mode |= RADEON_AGPv3_4X_MODE; |
break; |
} |
} else { |
switch (radeon_agpmode) { |
case 4: |
mode.mode |= RADEON_AGP_4X_MODE; |
break; |
case 2: |
mode.mode |= RADEON_AGP_2X_MODE; |
break; |
case 1: |
default: |
mode.mode |= RADEON_AGP_1X_MODE; |
break; |
} |
} |
mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */ |
ret = drm_agp_enable(rdev->ddev, mode); |
if (ret) { |
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode); |
return ret; |
} |
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base; |
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20; |
/* workaround some hw issues */ |
if (rdev->family < CHIP_R200) { |
WREG32(RADEON_AGP_CNTL, RREG32(RADEON_AGP_CNTL) | 0x000e0000); |
} |
return 0; |
#else |
return 0; |
#endif |
} |
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
int radeon_fence_driver_init(struct radeon_device *rdev) |
{ |
unsigned long irq_flags; |
int r; |
// write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
if (r) { |
DRM_ERROR("Fence failed to get a scratch register."); |
// write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
return r; |
} |
WREG32(rdev->fence_drv.scratch_reg, 0); |
// atomic_set(&rdev->fence_drv.seq, 0); |
// INIT_LIST_HEAD(&rdev->fence_drv.created); |
// INIT_LIST_HEAD(&rdev->fence_drv.emited); |
// INIT_LIST_HEAD(&rdev->fence_drv.signaled); |
rdev->fence_drv.count_timeout = 0; |
// init_waitqueue_head(&rdev->fence_drv.queue); |
// write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
// if (radeon_debugfs_fence_init(rdev)) { |
// DRM_ERROR("Failed to register debugfs file for fence !\n"); |
// } |
return 0; |
} |
/drivers/video/drm/radeon/radeon_bios.c |
---|
35,7 → 35,7 |
*/ |
static bool radeon_read_bios(struct radeon_device *rdev) |
{ |
uint8_t __iomem *bios; |
uint8_t *bios; |
size_t size; |
rdev->bios = NULL; |
48,7 → 48,7 |
// pci_unmap_rom(rdev->pdev, bios); |
return false; |
} |
rdev->bios = kmalloc(size, GFP_KERNEL); |
rdev->bios = malloc(size); |
if (rdev->bios == NULL) { |
// pci_unmap_rom(rdev->pdev, bios); |
return false; |
58,6 → 58,7 |
return true; |
} |
static bool r700_read_disabled_bios(struct radeon_device *rdev) |
{ |
uint32_t viph_control; |
351,11 → 352,14 |
return legacy_read_disabled_bios(rdev); |
} |
bool radeon_get_bios(struct radeon_device *rdev) |
{ |
bool r; |
uint16_t tmp; |
dbgprintf("%s\n\r",__FUNCTION__); |
r = radeon_read_bios(rdev); |
if (r == false) { |
r = radeon_read_disabled_bios(rdev); |
381,7 → 385,7 |
rdev->is_atom_bios = false; |
} |
DRM_DEBUG("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM"); |
dbgprintf("%sBIOS detected\n", rdev->is_atom_bios ? "ATOM" : "COM"); |
return true; |
free_bios: |
kfree(rdev->bios); |
/drivers/video/drm/radeon/radeon_connectors.c |
---|
28,7 → 28,6 |
#include "drm_crtc_helper.h" |
#include "radeon_drm.h" |
#include "radeon.h" |
#include "atom.h" |
extern void |
radeon_combios_connected_scratch_regs(struct drm_connector *connector, |
39,15 → 38,6 |
struct drm_encoder *encoder, |
bool connected); |
static void radeon_property_change_mode(struct drm_encoder *encoder) |
{ |
struct drm_crtc *crtc = encoder->crtc; |
if (crtc && crtc->enabled) { |
drm_crtc_helper_set_mode(crtc, &crtc->mode, |
crtc->x, crtc->y, crtc->fb); |
} |
} |
static void |
radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_connector_status status) |
{ |
87,27 → 77,6 |
} |
} |
struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) |
{ |
struct drm_mode_object *obj; |
struct drm_encoder *encoder; |
int i; |
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
if (connector->encoder_ids[i] == 0) |
break; |
obj = drm_mode_object_find(connector->dev, connector->encoder_ids[i], DRM_MODE_OBJECT_ENCODER); |
if (!obj) |
continue; |
encoder = obj_to_encoder(obj); |
if (encoder->encoder_type == encoder_type) |
return encoder; |
} |
return NULL; |
} |
struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) |
{ |
int enc_id = connector->encoder_ids[0]; |
114,6 → 83,8 |
struct drm_mode_object *obj; |
struct drm_encoder *encoder; |
ENTRY(); |
/* pick the encoder ids */ |
if (enc_id) { |
obj = drm_mode_object_find(connector->dev, enc_id, DRM_MODE_OBJECT_ENCODER); |
125,53 → 96,6 |
return NULL; |
} |
/* |
* radeon_connector_analog_encoder_conflict_solve |
* - search for other connectors sharing this encoder |
* if priority is true, then set them disconnected if this is connected |
* if priority is false, set us disconnected if they are connected |
*/ |
static enum drm_connector_status |
radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, |
struct drm_encoder *encoder, |
enum drm_connector_status current_status, |
bool priority) |
{ |
struct drm_device *dev = connector->dev; |
struct drm_connector *conflict; |
int i; |
list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { |
if (conflict == connector) |
continue; |
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
if (conflict->encoder_ids[i] == 0) |
break; |
/* if the IDs match */ |
if (conflict->encoder_ids[i] == encoder->base.id) { |
if (conflict->status != connector_status_connected) |
continue; |
if (priority == true) { |
DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict)); |
DRM_INFO("in favor of %s\n", drm_get_connector_name(connector)); |
conflict->status = connector_status_disconnected; |
radeon_connector_update_scratch_regs(conflict, connector_status_disconnected); |
} else { |
DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector)); |
DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict)); |
current_status = connector_status_disconnected; |
} |
break; |
} |
} |
} |
return current_status; |
} |
static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
204,172 → 128,13 |
return mode; |
} |
static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_connector *connector) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_display_mode *mode = NULL; |
struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; |
int i; |
struct mode_size { |
int w; |
int h; |
} common_modes[17] = { |
{ 640, 480}, |
{ 720, 480}, |
{ 800, 600}, |
{ 848, 480}, |
{1024, 768}, |
{1152, 768}, |
{1280, 720}, |
{1280, 800}, |
{1280, 854}, |
{1280, 960}, |
{1280, 1024}, |
{1440, 900}, |
{1400, 1050}, |
{1680, 1050}, |
{1600, 1200}, |
{1920, 1080}, |
{1920, 1200} |
}; |
for (i = 0; i < 17; i++) { |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
if (common_modes[i].w > native_mode->panel_xres || |
common_modes[i].h > native_mode->panel_yres || |
(common_modes[i].w == native_mode->panel_xres && |
common_modes[i].h == native_mode->panel_yres)) |
continue; |
} |
if (common_modes[i].w < 320 || common_modes[i].h < 200) |
continue; |
mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false); |
drm_mode_probed_add(connector, mode); |
} |
} |
int radeon_connector_set_property(struct drm_connector *connector, struct drm_property *property, |
uint64_t val) |
{ |
struct drm_device *dev = connector->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct drm_encoder *encoder; |
struct radeon_encoder *radeon_encoder; |
if (property == rdev->mode_info.coherent_mode_property) { |
struct radeon_encoder_atom_dig *dig; |
/* need to find digital encoder on connector */ |
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); |
if (!encoder) |
return 0; |
radeon_encoder = to_radeon_encoder(encoder); |
if (!radeon_encoder->enc_priv) |
return 0; |
dig = radeon_encoder->enc_priv; |
dig->coherent_mode = val ? true : false; |
radeon_property_change_mode(&radeon_encoder->base); |
} |
if (property == rdev->mode_info.tv_std_property) { |
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TVDAC); |
if (!encoder) { |
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_DAC); |
} |
if (!encoder) |
return 0; |
radeon_encoder = to_radeon_encoder(encoder); |
if (!radeon_encoder->enc_priv) |
return 0; |
if (rdev->is_atom_bios) { |
struct radeon_encoder_atom_dac *dac_int; |
dac_int = radeon_encoder->enc_priv; |
dac_int->tv_std = val; |
} else { |
struct radeon_encoder_tv_dac *dac_int; |
dac_int = radeon_encoder->enc_priv; |
dac_int->tv_std = val; |
} |
radeon_property_change_mode(&radeon_encoder->base); |
} |
if (property == rdev->mode_info.load_detect_property) { |
struct radeon_connector *radeon_connector = |
to_radeon_connector(connector); |
if (val == 0) |
radeon_connector->dac_load_detect = false; |
else |
radeon_connector->dac_load_detect = true; |
} |
if (property == rdev->mode_info.tmds_pll_property) { |
struct radeon_encoder_int_tmds *tmds = NULL; |
bool ret = false; |
/* need to find digital encoder on connector */ |
encoder = radeon_find_encoder(connector, DRM_MODE_ENCODER_TMDS); |
if (!encoder) |
return 0; |
radeon_encoder = to_radeon_encoder(encoder); |
tmds = radeon_encoder->enc_priv; |
if (!tmds) |
return 0; |
if (val == 0) { |
if (rdev->is_atom_bios) |
ret = radeon_atombios_get_tmds_info(radeon_encoder, tmds); |
else |
ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds); |
} |
if (val == 1 || ret == false) { |
radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds); |
} |
radeon_property_change_mode(&radeon_encoder->base); |
} |
return 0; |
} |
static void radeon_fixup_lvds_native_mode(struct drm_encoder *encoder, |
struct drm_connector *connector) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; |
/* Try to get native mode details from EDID if necessary */ |
if (!native_mode->dotclock) { |
struct drm_display_mode *t, *mode; |
list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { |
if (mode->hdisplay == native_mode->panel_xres && |
mode->vdisplay == native_mode->panel_yres) { |
native_mode->hblank = mode->htotal - mode->hdisplay; |
native_mode->hoverplus = mode->hsync_start - mode->hdisplay; |
native_mode->hsync_width = mode->hsync_end - mode->hsync_start; |
native_mode->vblank = mode->vtotal - mode->vdisplay; |
native_mode->voverplus = mode->vsync_start - mode->vdisplay; |
native_mode->vsync_width = mode->vsync_end - mode->vsync_start; |
native_mode->dotclock = mode->clock; |
DRM_INFO("Determined LVDS native mode details from EDID\n"); |
break; |
} |
} |
} |
if (!native_mode->dotclock) { |
DRM_INFO("No LVDS native mode details, disabling RMX\n"); |
radeon_encoder->rmx_type = RMX_OFF; |
} |
} |
static int radeon_lvds_get_modes(struct drm_connector *connector) |
{ |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
380,12 → 145,6 |
if (radeon_connector->ddc_bus) { |
ret = radeon_ddc_get_modes(radeon_connector); |
if (ret > 0) { |
encoder = radeon_best_single_encoder(connector); |
if (encoder) { |
radeon_fixup_lvds_native_mode(encoder, connector); |
/* add scaled modes */ |
radeon_add_common_modes(encoder, connector); |
} |
return ret; |
} |
} |
399,10 → 158,7 |
if (mode) { |
ret = 1; |
drm_mode_probed_add(connector, mode); |
/* add scaled modes */ |
radeon_add_common_modes(encoder, connector); |
} |
return ret; |
} |
432,42 → 188,6 |
kfree(connector); |
} |
static int radeon_lvds_set_property(struct drm_connector *connector, |
struct drm_property *property, |
uint64_t value) |
{ |
struct drm_device *dev = connector->dev; |
struct radeon_encoder *radeon_encoder; |
enum radeon_rmx_type rmx_type; |
DRM_DEBUG("\n"); |
if (property != dev->mode_config.scaling_mode_property) |
return 0; |
if (connector->encoder) |
radeon_encoder = to_radeon_encoder(connector->encoder); |
else { |
struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; |
radeon_encoder = to_radeon_encoder(connector_funcs->best_encoder(connector)); |
} |
switch (value) { |
case DRM_MODE_SCALE_NONE: rmx_type = RMX_OFF; break; |
case DRM_MODE_SCALE_CENTER: rmx_type = RMX_CENTER; break; |
case DRM_MODE_SCALE_ASPECT: rmx_type = RMX_ASPECT; break; |
default: |
case DRM_MODE_SCALE_FULLSCREEN: rmx_type = RMX_FULL; break; |
} |
if (radeon_encoder->rmx_type == rmx_type) |
return 0; |
radeon_encoder->rmx_type = rmx_type; |
radeon_property_change_mode(&radeon_encoder->base); |
return 0; |
} |
struct drm_connector_helper_funcs radeon_lvds_connector_helper_funcs = { |
.get_modes = radeon_lvds_get_modes, |
.mode_valid = radeon_lvds_mode_valid, |
479,7 → 199,7 |
.detect = radeon_lvds_detect, |
.fill_modes = drm_helper_probe_single_connector_modes, |
.destroy = radeon_connector_destroy, |
.set_property = radeon_lvds_set_property, |
.set_property = radeon_connector_set_property, |
}; |
static int radeon_vga_get_modes(struct drm_connector *connector) |
495,6 → 215,7 |
static int radeon_vga_mode_valid(struct drm_connector *connector, |
struct drm_display_mode *mode) |
{ |
return MODE_OK; |
} |
506,10 → 227,6 |
bool dret; |
enum drm_connector_status ret = connector_status_disconnected; |
encoder = radeon_best_single_encoder(connector); |
if (!encoder) |
ret = connector_status_disconnected; |
radeon_i2c_do_lock(radeon_connector, 1); |
dret = radeon_ddc_probe(radeon_connector); |
radeon_i2c_do_lock(radeon_connector, 0); |
516,14 → 233,16 |
if (dret) |
ret = connector_status_connected; |
else { |
if (radeon_connector->dac_load_detect) { |
/* if EDID fails to a load detect */ |
encoder = radeon_best_single_encoder(connector); |
if (!encoder) |
ret = connector_status_disconnected; |
else { |
encoder_funcs = encoder->helper_private; |
ret = encoder_funcs->detect(encoder, connector); |
} |
} |
if (ret == connector_status_connected) |
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); |
radeon_connector_update_scratch_regs(connector, ret); |
return ret; |
} |
542,73 → 261,6 |
.set_property = radeon_connector_set_property, |
}; |
static int radeon_tv_get_modes(struct drm_connector *connector) |
{ |
struct drm_device *dev = connector->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct drm_display_mode *tv_mode; |
struct drm_encoder *encoder; |
encoder = radeon_best_single_encoder(connector); |
if (!encoder) |
return 0; |
/* avivo chips can scale any mode */ |
if (rdev->family >= CHIP_RS600) |
/* add scaled modes */ |
radeon_add_common_modes(encoder, connector); |
else { |
/* only 800x600 is supported right now on pre-avivo chips */ |
tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false); |
tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; |
drm_mode_probed_add(connector, tv_mode); |
} |
return 1; |
} |
static int radeon_tv_mode_valid(struct drm_connector *connector, |
struct drm_display_mode *mode) |
{ |
return MODE_OK; |
} |
static enum drm_connector_status radeon_tv_detect(struct drm_connector *connector) |
{ |
struct drm_encoder *encoder; |
struct drm_encoder_helper_funcs *encoder_funcs; |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
enum drm_connector_status ret = connector_status_disconnected; |
if (!radeon_connector->dac_load_detect) |
return ret; |
encoder = radeon_best_single_encoder(connector); |
if (!encoder) |
ret = connector_status_disconnected; |
else { |
encoder_funcs = encoder->helper_private; |
ret = encoder_funcs->detect(encoder, connector); |
} |
if (ret == connector_status_connected) |
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, false); |
radeon_connector_update_scratch_regs(connector, ret); |
return ret; |
} |
struct drm_connector_helper_funcs radeon_tv_connector_helper_funcs = { |
.get_modes = radeon_tv_get_modes, |
.mode_valid = radeon_tv_mode_valid, |
.best_encoder = radeon_best_single_encoder, |
}; |
struct drm_connector_funcs radeon_tv_connector_funcs = { |
.dpms = drm_helper_connector_dpms, |
.detect = radeon_tv_detect, |
.fill_modes = drm_helper_probe_single_connector_modes, |
.destroy = radeon_connector_destroy, |
.set_property = radeon_connector_set_property, |
}; |
static int radeon_dvi_get_modes(struct drm_connector *connector) |
{ |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
615,24 → 267,15 |
int ret; |
ret = radeon_ddc_get_modes(radeon_connector); |
/* reset scratch regs here since radeon_dvi_detect doesn't check digital bit */ |
radeon_connector_update_scratch_regs(connector, connector_status_connected); |
return ret; |
} |
/* |
* DVI is complicated |
* Do a DDC probe, if DDC probe passes, get the full EDID so |
* we can do analog/digital monitor detection at this point. |
* If the monitor is an analog monitor or we got no DDC, |
* we need to find the DAC encoder object for this connector. |
* If we got no DDC, we do load detection on the DAC encoder object. |
* If we got analog DDC or load detection passes on the DAC encoder |
* we have to check if this analog encoder is shared with anyone else (TV) |
* if its shared we have to set the other connector to disconnected. |
*/ |
static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connector) |
{ |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
struct drm_encoder *encoder = NULL; |
struct drm_encoder *encoder; |
struct drm_encoder_helper_funcs *encoder_funcs; |
struct drm_mode_object *obj; |
int i; |
642,29 → 285,9 |
radeon_i2c_do_lock(radeon_connector, 1); |
dret = radeon_ddc_probe(radeon_connector); |
radeon_i2c_do_lock(radeon_connector, 0); |
if (dret) { |
radeon_i2c_do_lock(radeon_connector, 1); |
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
radeon_i2c_do_lock(radeon_connector, 0); |
if (!radeon_connector->edid) { |
DRM_ERROR("DDC responded but not EDID found for %s\n", |
drm_get_connector_name(connector)); |
} else { |
radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); |
/* if this isn't a digital monitor |
then we need to make sure we don't have any |
TV conflicts */ |
if (dret) |
ret = connector_status_connected; |
} |
} |
if ((ret == connector_status_connected) && (radeon_connector->use_digital == true)) |
goto out; |
/* find analog encoder */ |
if (radeon_connector->dac_load_detect) { |
else { |
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
if (connector->encoder_ids[i] == 0) |
break; |
679,23 → 302,15 |
encoder_funcs = encoder->helper_private; |
if (encoder_funcs->detect) { |
if (ret != connector_status_connected) { |
ret = encoder_funcs->detect(encoder, connector); |
if (ret == connector_status_connected) { |
radeon_connector->use_digital = false; |
} |
} |
radeon_connector->use_digital = 0; |
break; |
} |
} |
} |
if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) && |
encoder) { |
ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); |
} |
out: |
/* updated in get modes as well since we need to know if it's analog or digital */ |
radeon_connector_update_scratch_regs(connector, ret); |
return ret; |
709,6 → 324,9 |
struct drm_mode_object *obj; |
struct drm_encoder *encoder; |
int i; |
ENTRY(); |
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) { |
if (connector->encoder_ids[i] == 0) |
break; |
719,7 → 337,7 |
encoder = obj_to_encoder(obj); |
if (radeon_connector->use_digital == true) { |
if (radeon_connector->use_digital) { |
if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) |
return encoder; |
} else { |
766,7 → 384,6 |
bool linkb, |
uint32_t igp_lane_info) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
struct radeon_connector_atom_dig *radeon_dig_connector; |
773,7 → 390,10 |
uint32_t subpixel_order = SubPixelNone; |
/* fixme - tv/cv/din */ |
if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
if ((connector_type == DRM_MODE_CONNECTOR_Unknown) || |
(connector_type == DRM_MODE_CONNECTOR_SVIDEO) || |
(connector_type == DRM_MODE_CONNECTOR_Composite) || |
(connector_type == DRM_MODE_CONNECTOR_9PinDIN)) |
return; |
/* see if we already added it */ |
802,9 → 422,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
case DRM_MODE_CONNECTOR_DVIA: |
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
814,9 → 431,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
case DRM_MODE_CONNECTOR_DVII: |
case DRM_MODE_CONNECTOR_DVID: |
834,12 → 448,6 |
goto failed; |
} |
subpixel_order = SubPixelHorizontalRGB; |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.coherent_mode_property, |
1); |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
case DRM_MODE_CONNECTOR_HDMIA: |
case DRM_MODE_CONNECTOR_HDMIB: |
856,9 → 464,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.coherent_mode_property, |
1); |
subpixel_order = SubPixelHorizontalRGB; |
break; |
case DRM_MODE_CONNECTOR_DisplayPort: |
880,13 → 485,6 |
case DRM_MODE_CONNECTOR_SVIDEO: |
case DRM_MODE_CONNECTOR_Composite: |
case DRM_MODE_CONNECTOR_9PinDIN: |
if (radeon_tv == 1) { |
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
} |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
case DRM_MODE_CONNECTOR_LVDS: |
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); |
902,10 → 500,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_mode_create_scaling_mode_property(dev); |
drm_connector_attach_property(&radeon_connector->base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_FULLSCREEN); |
subpixel_order = SubPixelHorizontalRGB; |
break; |
} |
928,13 → 522,15 |
int connector_type, |
struct radeon_i2c_bus_rec *i2c_bus) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_connector *connector; |
struct radeon_connector *radeon_connector; |
uint32_t subpixel_order = SubPixelNone; |
/* fixme - tv/cv/din */ |
if (connector_type == DRM_MODE_CONNECTOR_Unknown) |
if ((connector_type == DRM_MODE_CONNECTOR_Unknown) || |
(connector_type == DRM_MODE_CONNECTOR_SVIDEO) || |
(connector_type == DRM_MODE_CONNECTOR_Composite) || |
(connector_type == DRM_MODE_CONNECTOR_9PinDIN)) |
return; |
/* see if we already added it */ |
963,9 → 559,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
case DRM_MODE_CONNECTOR_DVIA: |
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); |
975,9 → 568,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
break; |
case DRM_MODE_CONNECTOR_DVII: |
case DRM_MODE_CONNECTOR_DVID: |
987,9 → 577,6 |
radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); |
if (!radeon_connector->ddc_bus) |
goto failed; |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
} |
subpixel_order = SubPixelHorizontalRGB; |
break; |
996,13 → 583,6 |
case DRM_MODE_CONNECTOR_SVIDEO: |
case DRM_MODE_CONNECTOR_Composite: |
case DRM_MODE_CONNECTOR_9PinDIN: |
if (radeon_tv == 1) { |
drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); |
drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); |
drm_connector_attach_property(&radeon_connector->base, |
rdev->mode_info.load_detect_property, |
1); |
} |
break; |
case DRM_MODE_CONNECTOR_LVDS: |
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); |
1012,9 → 592,6 |
if (!radeon_connector->ddc_bus) |
goto failed; |
} |
drm_connector_attach_property(&radeon_connector->base, |
dev->mode_config.scaling_mode_property, |
DRM_MODE_SCALE_FULLSCREEN); |
subpixel_order = SubPixelHorizontalRGB; |
break; |
} |
/drivers/video/drm/radeon/radeon_device.c |
---|
27,8 → 27,8 |
*/ |
//#include <linux/console.h> |
#include <drm/drmP.h> |
#include <drm/drm_crtc_helper.h> |
#include <drmP.h> |
#include <drm_crtc_helper.h> |
#include "radeon_drm.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
43,15 → 43,14 |
int radeon_gart_size = 512; /* default gart size */ |
int radeon_benchmarking = 0; |
int radeon_connector_table = 0; |
int radeon_tv = 1; |
/* |
* Clear GPU surface registers. |
*/ |
void radeon_surface_init(struct radeon_device *rdev) |
static void radeon_surface_init(struct radeon_device *rdev) |
{ |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
/* FIXME: check this out */ |
if (rdev->family < CHIP_R600) { |
62,8 → 61,6 |
i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO), |
0); |
} |
/* enable surfaces */ |
WREG32(RADEON_SURFACE_CNTL, 0); |
} |
} |
70,7 → 67,7 |
/* |
* GPU scratch registers helpers function. |
*/ |
void radeon_scratch_init(struct radeon_device *rdev) |
static void radeon_scratch_init(struct radeon_device *rdev) |
{ |
int i; |
135,7 → 132,7 |
if (rdev->mc.vram_location != 0xFFFFFFFFUL) { |
/* vram location was already setup try to put gtt after |
* if it fits */ |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size; |
tmp = rdev->mc.vram_location + rdev->mc.vram_size; |
tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) { |
rdev->mc.gtt_location = tmp; |
150,13 → 147,13 |
} else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) { |
/* gtt location was already setup try to put vram before |
* if it fits */ |
if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) { |
if (rdev->mc.vram_size < rdev->mc.gtt_location) { |
rdev->mc.vram_location = 0; |
} else { |
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size; |
tmp += (rdev->mc.mc_vram_size - 1); |
tmp &= ~(rdev->mc.mc_vram_size - 1); |
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) { |
tmp += (rdev->mc.vram_size - 1); |
tmp &= ~(rdev->mc.vram_size - 1); |
if ((0xFFFFFFFFUL - tmp) >= rdev->mc.vram_size) { |
rdev->mc.vram_location = tmp; |
} else { |
printk(KERN_ERR "[drm] vram too big to fit " |
166,22 → 163,16 |
} |
} else { |
rdev->mc.vram_location = 0; |
tmp = rdev->mc.mc_vram_size; |
tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1); |
rdev->mc.gtt_location = tmp; |
rdev->mc.gtt_location = rdev->mc.vram_size; |
} |
rdev->mc.vram_start = rdev->mc.vram_location; |
rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
rdev->mc.gtt_start = rdev->mc.gtt_location; |
rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20)); |
DRM_INFO("radeon: VRAM %uM\n", rdev->mc.vram_size >> 20); |
DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n", |
(unsigned)rdev->mc.vram_location, |
(unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1)); |
DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20)); |
rdev->mc.vram_location, |
rdev->mc.vram_location + rdev->mc.vram_size - 1); |
DRM_INFO("radeon: GTT %uM\n", rdev->mc.gtt_size >> 20); |
DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n", |
(unsigned)rdev->mc.gtt_location, |
(unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1)); |
rdev->mc.gtt_location, |
rdev->mc.gtt_location + rdev->mc.gtt_size - 1); |
return 0; |
} |
189,11 → 180,11 |
/* |
* GPU helpers function. |
*/ |
bool radeon_card_posted(struct radeon_device *rdev) |
static bool radeon_card_posted(struct radeon_device *rdev) |
{ |
uint32_t reg; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
/* first check CRTCs */ |
if (ASIC_IS_AVIVO(rdev)) { |
243,28 → 234,34 |
void radeon_register_accessor_init(struct radeon_device *rdev) |
{ |
dbgprintf("%s\n",__FUNCTION__); |
rdev->mm_rreg = &r100_mm_rreg; |
rdev->mm_wreg = &r100_mm_wreg; |
rdev->mc_rreg = &radeon_invalid_rreg; |
rdev->mc_wreg = &radeon_invalid_wreg; |
rdev->pll_rreg = &radeon_invalid_rreg; |
rdev->pll_wreg = &radeon_invalid_wreg; |
rdev->pcie_rreg = &radeon_invalid_rreg; |
rdev->pcie_wreg = &radeon_invalid_wreg; |
rdev->pciep_rreg = &radeon_invalid_rreg; |
rdev->pciep_wreg = &radeon_invalid_wreg; |
/* Don't change order as we are overridding accessor. */ |
if (rdev->family < CHIP_RV515) { |
rdev->pcie_reg_mask = 0xff; |
} else { |
rdev->pcie_reg_mask = 0x7ff; |
rdev->pcie_rreg = &rv370_pcie_rreg; |
rdev->pcie_wreg = &rv370_pcie_wreg; |
} |
if (rdev->family >= CHIP_RV515) { |
rdev->pcie_rreg = &rv515_pcie_rreg; |
rdev->pcie_wreg = &rv515_pcie_wreg; |
} |
/* FIXME: not sure here */ |
if (rdev->family <= CHIP_R580) { |
rdev->pll_rreg = &r100_pll_rreg; |
rdev->pll_wreg = &r100_pll_wreg; |
} |
if (rdev->family >= CHIP_R420) { |
rdev->mc_rreg = &r420_mc_rreg; |
rdev->mc_wreg = &r420_mc_wreg; |
} |
if (rdev->family >= CHIP_RV515) { |
rdev->mc_rreg = &rv515_mc_rreg; |
rdev->mc_wreg = &rv515_mc_wreg; |
273,19 → 270,19 |
rdev->mc_rreg = &rs400_mc_rreg; |
rdev->mc_wreg = &rs400_mc_wreg; |
} |
// if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
// rdev->mc_rreg = &rs690_mc_rreg; |
// rdev->mc_wreg = &rs690_mc_wreg; |
// } |
// if (rdev->family == CHIP_RS600) { |
// rdev->mc_rreg = &rs600_mc_rreg; |
// rdev->mc_wreg = &rs600_mc_wreg; |
// } |
// if (rdev->family >= CHIP_R600) { |
// rdev->pciep_rreg = &r600_pciep_rreg; |
// rdev->pciep_wreg = &r600_pciep_wreg; |
// } |
if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { |
rdev->mc_rreg = &rs690_mc_rreg; |
rdev->mc_wreg = &rs690_mc_wreg; |
} |
if (rdev->family == CHIP_RS600) { |
rdev->mc_rreg = &rs600_mc_rreg; |
rdev->mc_wreg = &rs600_mc_wreg; |
} |
if (rdev->family >= CHIP_R600) { |
rdev->pciep_rreg = &r600_pciep_rreg; |
rdev->pciep_wreg = &r600_pciep_wreg; |
} |
} |
/* |
293,6 → 290,9 |
*/ |
int radeon_asic_init(struct radeon_device *rdev) |
{ |
dbgprintf("%s\n",__FUNCTION__); |
radeon_register_accessor_init(rdev); |
switch (rdev->family) { |
case CHIP_R100: |
311,14 → 311,6 |
case CHIP_RV350: |
case CHIP_RV380: |
rdev->asic = &r300_asic; |
if (rdev->flags & RADEON_IS_PCIE) { |
rdev->asic->gart_init = &rv370_pcie_gart_init; |
rdev->asic->gart_fini = &rv370_pcie_gart_fini; |
rdev->asic->gart_enable = &rv370_pcie_gart_enable; |
rdev->asic->gart_disable = &rv370_pcie_gart_disable; |
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
} |
break; |
case CHIP_R420: |
case CHIP_R423: |
330,11 → 322,11 |
rdev->asic = &rs400_asic; |
break; |
case CHIP_RS600: |
// rdev->asic = &rs600_asic; |
rdev->asic = &rs600_asic; |
break; |
case CHIP_RS690: |
case CHIP_RS740: |
// rdev->asic = &rs690_asic; |
rdev->asic = &rs690_asic; |
break; |
case CHIP_RV515: |
rdev->asic = &rv515_asic; |
371,8 → 363,9 |
{ |
int r; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
radeon_get_clock_info(rdev->ddev); |
r = radeon_static_clocks_init(rdev->ddev); |
if (r) { |
return r; |
446,7 → 439,7 |
int radeon_atombios_init(struct radeon_device *rdev) |
{ |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
atom_card_info.dev = rdev->ddev; |
rdev->mode_info.atom_context = atom_parse(&atom_card_info, rdev->bios); |
472,7 → 465,6 |
int radeon_modeset_init(struct radeon_device *rdev); |
void radeon_modeset_fini(struct radeon_device *rdev); |
/* |
* Radeon device. |
*/ |
482,9 → 474,8 |
uint32_t flags) |
{ |
int r, ret; |
int dma_bits; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
DRM_INFO("radeon: Initializing kernel modesetting.\n"); |
rdev->shutdown = false; |
503,53 → 494,35 |
// mutex_init(&rdev->cp.mutex); |
// rwlock_init(&rdev->fence_drv.lock); |
/* Set asic functions */ |
r = radeon_asic_init(rdev); |
if (r) { |
return r; |
} |
if (radeon_agpmode == -1) { |
rdev->flags &= ~RADEON_IS_AGP; |
if (rdev->family >= CHIP_RV515 || |
if (rdev->family > CHIP_RV515 || |
rdev->family == CHIP_RV380 || |
rdev->family == CHIP_RV410 || |
rdev->family == CHIP_R423) { |
DRM_INFO("Forcing AGP to PCIE mode\n"); |
rdev->flags |= RADEON_IS_PCIE; |
rdev->asic->gart_init = &rv370_pcie_gart_init; |
rdev->asic->gart_fini = &rv370_pcie_gart_fini; |
rdev->asic->gart_enable = &rv370_pcie_gart_enable; |
rdev->asic->gart_disable = &rv370_pcie_gart_disable; |
rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush; |
rdev->asic->gart_set_page = &rv370_pcie_gart_set_page; |
} else { |
DRM_INFO("Forcing AGP to PCI mode\n"); |
rdev->flags |= RADEON_IS_PCI; |
rdev->asic->gart_init = &r100_pci_gart_init; |
rdev->asic->gart_fini = &r100_pci_gart_fini; |
rdev->asic->gart_enable = &r100_pci_gart_enable; |
rdev->asic->gart_disable = &r100_pci_gart_disable; |
rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush; |
rdev->asic->gart_set_page = &r100_pci_gart_set_page; |
} |
} |
/* set DMA mask + need_dma32 flags. |
* PCIE - can handle 40-bits. |
* IGP - can handle 40-bits (in theory) |
* AGP - generally dma32 is safest |
* PCI - only dma32 |
*/ |
rdev->need_dma32 = false; |
if (rdev->flags & RADEON_IS_AGP) |
rdev->need_dma32 = true; |
if (rdev->flags & RADEON_IS_PCI) |
rdev->need_dma32 = true; |
/* Set asic functions */ |
r = radeon_asic_init(rdev); |
if (r) { |
return r; |
} |
dma_bits = rdev->need_dma32 ? 32 : 40; |
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); |
r = rdev->asic->init(rdev); |
if (r) { |
return r; |
} |
/* Report DMA addressing limitation */ |
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(32)); |
if (r) { |
printk(KERN_WARNING "radeon: No suitable DMA available.\n"); |
} |
568,13 → 541,6 |
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base); |
DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size); |
rdev->new_init_path = false; |
r = radeon_init(rdev); |
if (r) { |
return r; |
} |
if (!rdev->new_init_path) { |
/* Setup errata flags */ |
radeon_errata(rdev); |
/* Initialize scratch registers */ |
582,6 → 548,7 |
/* Initialize surface registers */ |
radeon_surface_init(rdev); |
/* TODO: disable VGA need to use VGA request */ |
/* BIOS*/ |
if (!radeon_get_bios(rdev)) { |
if (ASIC_IS_AVIVO(rdev)) |
611,9 → 578,23 |
radeon_combios_asic_init(rdev->ddev); |
} |
} |
/* Get clock & vram information */ |
radeon_get_clock_info(rdev->ddev); |
/* Get vram informations */ |
radeon_vram_info(rdev); |
/* Device is severly broken if aper size > vram size. |
* for RN50/M6/M7 - Novell bug 204882 ? |
*/ |
if (rdev->mc.vram_size < rdev->mc.aper_size) { |
rdev->mc.aper_size = rdev->mc.vram_size; |
} |
/* Add an MTRR for the VRAM */ |
// rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size, |
// MTRR_TYPE_WRCOMB, 1); |
DRM_INFO("Detected VRAM RAM=%uM, BAR=%uM\n", |
rdev->mc.vram_size >> 20, |
(unsigned)rdev->mc.aper_size >> 20); |
DRM_INFO("RAM width %dbits %cDR\n", |
rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); |
/* Initialize clocks */ |
r = radeon_clocks_init(rdev); |
if (r) { |
625,51 → 606,74 |
if (r) { |
return r; |
} |
/* Fence driver */ |
// r = radeon_fence_driver_init(rdev); |
// if (r) { |
// return r; |
// } |
// r = radeon_irq_kms_init(rdev); |
// if (r) { |
// return r; |
// } |
/* Memory manager */ |
r = radeon_object_init(rdev); |
if (r) { |
return r; |
} |
r = radeon_gpu_gart_init(rdev); |
if (r) |
return r; |
/* Initialize GART (initialize after TTM so we can allocate |
* memory through TTM but finalize after TTM) */ |
r = radeon_gart_enable(rdev); |
if (r) |
return 0; |
if (!r) { |
r = radeon_gem_init(rdev); |
if (r) |
return 0; |
} |
/* 1M ring buffer */ |
// r = radeon_cp_init(rdev, 1024 * 1024); |
// if (r) |
// return 0; |
if (!r) { |
r = radeon_cp_init(rdev, 1024 * 1024); |
} |
// if (!r) { |
// r = radeon_wb_init(rdev); |
// if (r) { |
// DRM_ERROR("radeon: failled initializing WB (%d).\n", r); |
// return r; |
// } |
// } |
#if 0 |
r = radeon_wb_init(rdev); |
if (r) |
DRM_ERROR("radeon: failled initializing WB (%d).\n", r); |
if (!r) { |
r = radeon_ib_pool_init(rdev); |
if (r) |
return 0; |
if (r) { |
DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r); |
return r; |
} |
} |
if (!r) { |
r = radeon_ib_test(rdev); |
if (r) |
return 0; |
if (r) { |
DRM_ERROR("radeon: failled testing IB (%d).\n", r); |
return r; |
} |
} |
#endif |
rdev->accel_working = true; |
ret = r; |
r = radeon_modeset_init(rdev); |
if (r) { |
return r; |
} |
// if (rdev->fbdev_rfb && rdev->fbdev_rfb->obj) { |
// rdev->fbdev_robj = rdev->fbdev_rfb->obj->driver_private; |
// } |
if (!ret) { |
DRM_INFO("radeon: kernel modesetting successfully initialized.\n"); |
// if (radeon_testing) { |
// radeon_test_moves(rdev); |
// } |
// if (radeon_benchmarking) { |
} |
if (radeon_benchmarking) { |
// radeon_benchmark(rdev); |
// } |
return 0; |
} |
return ret; |
// return -1; |
} |
static struct pci_device_id pciidlist[] = { |
radeon_PCI_IDS |
676,7 → 680,7 |
}; |
u32_t drvEntry(int action, char *cmdline) |
u32_t __stdcall drvEntry(int action) |
{ |
struct pci_device_id *ent; |
693,9 → 697,6 |
return 0; |
} |
if(cmdline) |
dbgprintf("cmdline: %s\n", cmdline); |
enum_pci_devices(); |
ent = find_pci_device(&device, pciidlist); |
931,6 → 932,3 |
/drivers/video/drm/radeon/radeon_ring.c |
---|
25,7 → 25,7 |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include <linux/seq_file.h> |
//#include <linux/seq_file.h> |
#include "drmP.h" |
#include "radeon_drm.h" |
#include "radeon_reg.h" |
32,6 → 32,7 |
#include "radeon.h" |
#include "atom.h" |
int radeon_debugfs_ib_init(struct radeon_device *rdev); |
/* |
59,12 → 60,10 |
set_bit(i, rdev->ib_pool.alloc_bm); |
rdev->ib_pool.ibs[i].length_dw = 0; |
*ib = &rdev->ib_pool.ibs[i]; |
mutex_unlock(&rdev->ib_pool.mutex); |
goto out; |
} |
if (list_empty(&rdev->ib_pool.scheduled_ibs)) { |
/* we go do nothings here */ |
mutex_unlock(&rdev->ib_pool.mutex); |
DRM_ERROR("all IB allocated none scheduled.\n"); |
r = -EINVAL; |
goto out; |
74,13 → 73,10 |
struct radeon_ib, list); |
if (nib->fence == NULL) { |
/* we go do nothings here */ |
mutex_unlock(&rdev->ib_pool.mutex); |
DRM_ERROR("IB %lu scheduled without a fence.\n", nib->idx); |
r = -EINVAL; |
goto out; |
} |
mutex_unlock(&rdev->ib_pool.mutex); |
r = radeon_fence_wait(nib->fence, false); |
if (r) { |
DRM_ERROR("radeon: IB(%lu:0x%016lX:%u)\n", nib->idx, |
89,17 → 85,12 |
goto out; |
} |
radeon_fence_unref(&nib->fence); |
nib->length_dw = 0; |
/* scheduled list is accessed here */ |
mutex_lock(&rdev->ib_pool.mutex); |
list_del(&nib->list); |
INIT_LIST_HEAD(&nib->list); |
mutex_unlock(&rdev->ib_pool.mutex); |
*ib = nib; |
out: |
mutex_unlock(&rdev->ib_pool.mutex); |
if (r) { |
radeon_fence_unref(&fence); |
} else { |
124,36 → 115,60 |
} |
list_del(&tmp->list); |
INIT_LIST_HEAD(&tmp->list); |
if (tmp->fence) |
if (tmp->fence) { |
radeon_fence_unref(&tmp->fence); |
} |
tmp->length_dw = 0; |
clear_bit(tmp->idx, rdev->ib_pool.alloc_bm); |
mutex_unlock(&rdev->ib_pool.mutex); |
} |
static void radeon_ib_align(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
while ((ib->length_dw & rdev->cp.align_mask)) { |
ib->ptr[ib->length_dw++] = PACKET2(0); |
} |
} |
static void radeon_ib_cpu_flush(struct radeon_device *rdev, |
struct radeon_ib *ib) |
{ |
unsigned long tmp; |
unsigned i; |
/* To force CPU cache flush ugly but seems reliable */ |
for (i = 0; i < ib->length_dw; i += (rdev->cp.align_mask + 1)) { |
tmp = readl(&ib->ptr[i]); |
} |
} |
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) |
{ |
int r = 0; |
mutex_lock(&rdev->ib_pool.mutex); |
radeon_ib_align(rdev, ib); |
radeon_ib_cpu_flush(rdev, ib); |
if (!ib->length_dw || !rdev->cp.ready) { |
/* TODO: Nothings in the ib we should report. */ |
mutex_unlock(&rdev->ib_pool.mutex); |
DRM_ERROR("radeon: couldn't schedule IB(%lu).\n", ib->idx); |
return -EINVAL; |
} |
/* 64 dwords should be enough for fence too */ |
/* 64 dwords should be enought for fence too */ |
r = radeon_ring_lock(rdev, 64); |
if (r) { |
DRM_ERROR("radeon: scheduling IB failled (%d).\n", r); |
mutex_unlock(&rdev->ib_pool.mutex); |
return r; |
} |
radeon_ring_ib_execute(rdev, ib); |
radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1)); |
radeon_ring_write(rdev, ib->gpu_addr); |
radeon_ring_write(rdev, ib->length_dw); |
radeon_fence_emit(rdev, ib->fence); |
mutex_lock(&rdev->ib_pool.mutex); |
radeon_ring_unlock_commit(rdev); |
list_add_tail(&ib->list, &rdev->ib_pool.scheduled_ibs); |
mutex_unlock(&rdev->ib_pool.mutex); |
radeon_ring_unlock_commit(rdev); |
return 0; |
} |
#endif |
165,8 → 180,6 |
int i; |
int r = 0; |
if (rdev->ib_pool.robj) |
return 0; |
/* Allocate 1M object buffer */ |
INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs); |
r = radeon_object_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024, |
210,7 → 223,7 |
if (!rdev->ib_pool.ready) { |
return; |
} |
mutex_lock(&rdev->ib_pool.mutex); |
// mutex_lock(&rdev->ib_pool.mutex); |
bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE); |
if (rdev->ib_pool.robj) { |
// radeon_object_kunmap(rdev->ib_pool.robj); |
217,18 → 230,74 |
// radeon_object_unref(&rdev->ib_pool.robj); |
rdev->ib_pool.robj = NULL; |
} |
mutex_unlock(&rdev->ib_pool.mutex); |
// mutex_unlock(&rdev->ib_pool.mutex); |
} |
#if 0 |
int radeon_ib_test(struct radeon_device *rdev) |
{ |
struct radeon_ib *ib; |
uint32_t scratch; |
uint32_t tmp = 0; |
unsigned i; |
int r; |
r = radeon_scratch_get(rdev, &scratch); |
if (r) { |
DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r); |
return r; |
} |
WREG32(scratch, 0xCAFEDEAD); |
r = radeon_ib_get(rdev, &ib); |
if (r) { |
return r; |
} |
ib->ptr[0] = PACKET0(scratch, 0); |
ib->ptr[1] = 0xDEADBEEF; |
ib->ptr[2] = PACKET2(0); |
ib->ptr[3] = PACKET2(0); |
ib->ptr[4] = PACKET2(0); |
ib->ptr[5] = PACKET2(0); |
ib->ptr[6] = PACKET2(0); |
ib->ptr[7] = PACKET2(0); |
ib->length_dw = 8; |
r = radeon_ib_schedule(rdev, ib); |
if (r) { |
radeon_scratch_free(rdev, scratch); |
radeon_ib_free(rdev, &ib); |
return r; |
} |
r = radeon_fence_wait(ib->fence, false); |
if (r) { |
return r; |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(scratch); |
if (tmp == 0xDEADBEEF) { |
break; |
} |
DRM_UDELAY(1); |
} |
if (i < rdev->usec_timeout) { |
DRM_INFO("ib test succeeded in %u usecs\n", i); |
} else { |
DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n", |
scratch, tmp); |
r = -EINVAL; |
} |
radeon_scratch_free(rdev, scratch); |
radeon_ib_free(rdev, &ib); |
return r; |
} |
#endif |
/* |
* Ring. |
*/ |
void radeon_ring_free_size(struct radeon_device *rdev) |
{ |
if (rdev->family >= CHIP_R600) |
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR); |
else |
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR); |
/* This works because ring_size is a power of 2 */ |
rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4)); |
246,12 → 315,14 |
/* Align requested size with padding so unlock_commit can |
* pad safely */ |
ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask; |
mutex_lock(&rdev->cp.mutex); |
// mutex_lock(&rdev->cp.mutex); |
while (ndw > (rdev->cp.ring_free_dw - 1)) { |
radeon_ring_free_size(rdev); |
if (ndw < rdev->cp.ring_free_dw) { |
break; |
} |
delay(1); |
// r = radeon_fence_wait_next(rdev); |
// if (r) { |
// mutex_unlock(&rdev->cp.mutex); |
272,26 → 343,79 |
count_dw_pad = (rdev->cp.align_mask + 1) - |
(rdev->cp.wptr & rdev->cp.align_mask); |
for (i = 0; i < count_dw_pad; i++) { |
radeon_ring_write(rdev, 2 << 30); |
radeon_ring_write(rdev, PACKET2(0)); |
} |
DRM_MEMORYBARRIER(); |
radeon_cp_commit(rdev); |
mutex_unlock(&rdev->cp.mutex); |
WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr); |
(void)RREG32(RADEON_CP_RB_WPTR); |
// mutex_unlock(&rdev->cp.mutex); |
} |
void radeon_ring_unlock_undo(struct radeon_device *rdev) |
{ |
rdev->cp.wptr = rdev->cp.wptr_old; |
mutex_unlock(&rdev->cp.mutex); |
// mutex_unlock(&rdev->cp.mutex); |
} |
int radeon_ring_test(struct radeon_device *rdev) |
{ |
uint32_t scratch; |
uint32_t tmp = 0; |
unsigned i; |
int r; |
dbgprintf("%s\n",__FUNCTION__); |
r = radeon_scratch_get(rdev, &scratch); |
if (r) { |
DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r); |
return r; |
} |
WREG32(scratch, 0xCAFEDEAD); |
r = radeon_ring_lock(rdev, 2); |
if (r) { |
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r); |
radeon_scratch_free(rdev, scratch); |
return r; |
} |
radeon_ring_write(rdev, PACKET0(scratch, 0)); |
radeon_ring_write(rdev, 0xDEADBEEF); |
radeon_ring_unlock_commit(rdev); |
for (i = 0; i < 100000; i++) { |
tmp = RREG32(scratch); |
if (tmp == 0xDEADBEEF) { |
break; |
} |
DRM_UDELAY(1); |
} |
if (i < 100000) { |
DRM_INFO("ring test succeeded in %d usecs\n", i); |
} else { |
DRM_ERROR("radeon: ring test failed (sracth(0x%04X)=0x%08X)\n", |
scratch, tmp); |
r = -EINVAL; |
} |
radeon_scratch_free(rdev, scratch); |
dbgprintf("done %s\n",__FUNCTION__); |
return r; |
} |
int radeon_gart_bind(struct radeon_device *rdev, unsigned offset, |
int pages, u32_t *pagelist); |
int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size) |
{ |
int r; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
rdev->cp.ring_size = ring_size; |
/* Allocate ring buffer */ |
if (rdev->cp.ring_obj == NULL) { |
r = radeon_object_create(rdev, NULL, rdev->cp.ring_size, |
301,7 → 425,7 |
&rdev->cp.ring_obj); |
if (r) { |
DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r); |
mutex_unlock(&rdev->cp.mutex); |
// mutex_unlock(&rdev->cp.mutex); |
return r; |
} |
r = radeon_object_pin(rdev->cp.ring_obj, |
309,7 → 433,7 |
&rdev->cp.gpu_addr); |
if (r) { |
DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r); |
mutex_unlock(&rdev->cp.mutex); |
// mutex_unlock(&rdev->cp.mutex); |
return r; |
} |
r = radeon_object_kmap(rdev->cp.ring_obj, |
316,7 → 440,7 |
(void **)&rdev->cp.ring); |
if (r) { |
DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r); |
mutex_unlock(&rdev->cp.mutex); |
// mutex_unlock(&rdev->cp.mutex); |
return r; |
} |
} |
337,7 → 461,7 |
rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1; |
rdev->cp.ring_free_dw = rdev->cp.ring_size / 4; |
LEAVE(); |
dbgprintf("done %s\n",__FUNCTION__); |
return 0; |
} |
344,7 → 468,7 |
void radeon_ring_fini(struct radeon_device *rdev) |
{ |
mutex_lock(&rdev->cp.mutex); |
// mutex_lock(&rdev->cp.mutex); |
if (rdev->cp.ring_obj) { |
// radeon_object_kunmap(rdev->cp.ring_obj); |
// radeon_object_unpin(rdev->cp.ring_obj); |
352,7 → 476,7 |
rdev->cp.ring = NULL; |
rdev->cp.ring_obj = NULL; |
} |
mutex_unlock(&rdev->cp.mutex); |
// mutex_unlock(&rdev->cp.mutex); |
} |
400,3 → 524,18 |
return 0; |
#endif |
} |
int drm_order(unsigned long size) |
{ |
int order; |
unsigned long tmp; |
for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ; |
if (size & (size - 1)) |
++order; |
return order; |
} |
/drivers/video/drm/radeon/rs400.c |
---|
25,8 → 25,8 |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include <linux/seq_file.h> |
#include <drm/drmP.h> |
//#include <linux/seq_file.h> |
#include <drmP.h> |
#include "radeon_reg.h" |
#include "radeon.h" |
62,7 → 62,7 |
break; |
default: |
DRM_ERROR("Unable to use IGP GART size %uM\n", |
(unsigned)(rdev->mc.gtt_size >> 20)); |
rdev->mc.gtt_size >> 20); |
DRM_ERROR("Valid GART size for IGP are 32M,64M,128M,256M,512M,1G,2G\n"); |
DRM_ERROR("Forcing to 32M GART size\n"); |
rdev->mc.gtt_size = 32 * 1024 * 1024; |
92,42 → 92,21 |
WREG32_MC(RS480_GART_CACHE_CNTRL, 0); |
} |
int rs400_gart_init(struct radeon_device *rdev) |
int rs400_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t size_reg; |
uint32_t tmp; |
int r; |
if (rdev->gart.table.ram.ptr) { |
WARN(1, "RS400 GART already initialized.\n"); |
return 0; |
} |
/* Check gart size */ |
switch(rdev->mc.gtt_size / (1024 * 1024)) { |
case 32: |
case 64: |
case 128: |
case 256: |
case 512: |
case 1024: |
case 2048: |
break; |
default: |
return -EINVAL; |
} |
/* Initialize common gart structure */ |
r = radeon_gart_init(rdev); |
if (r) |
if (r) { |
return r; |
if (rs400_debugfs_pcie_gart_info_init(rdev)) |
} |
if (rs400_debugfs_pcie_gart_info_init(rdev)) { |
DRM_ERROR("Failed to register debugfs file for RS400 GART !\n"); |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
return radeon_gart_table_ram_alloc(rdev); |
} |
int rs400_gart_enable(struct radeon_device *rdev) |
{ |
uint32_t size_reg; |
uint32_t tmp; |
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH); |
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS; |
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp); |
157,6 → 136,13 |
default: |
return -EINVAL; |
} |
if (rdev->gart.table.ram.ptr == NULL) { |
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
r = radeon_gart_table_ram_alloc(rdev); |
if (r) { |
return r; |
} |
} |
/* It should be fine to program it to max value */ |
if (rdev->family == CHIP_RS690 || (rdev->family == CHIP_RS740)) { |
WREG32_MC(RS690_MCCFG_AGP_BASE, 0xFFFFFFFF); |
178,9 → 164,7 |
WREG32(RADEON_BUS_CNTL, tmp); |
} |
/* Table should be in 32bits address space so ignore bits above. */ |
tmp = (u32)rdev->gart.table_addr & 0xfffff000; |
tmp |= (upper_32_bits(rdev->gart.table_addr) & 0xff) << 4; |
tmp = rdev->gart.table_addr & 0xfffff000; |
WREG32_MC(RS480_GART_BASE, tmp); |
/* TODO: more tweaking here */ |
WREG32_MC(RS480_GART_FEATURE_ID, |
215,26 → 199,12 |
WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, 0); |
} |
void rs400_gart_fini(struct radeon_device *rdev) |
{ |
rs400_gart_disable(rdev); |
radeon_gart_table_ram_free(rdev); |
radeon_gart_fini(rdev); |
} |
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr) |
{ |
uint32_t entry; |
if (i < 0 || i > rdev->gart.num_gpu_pages) { |
return -EINVAL; |
} |
entry = (lower_32_bits(addr) & PAGE_MASK) | |
((upper_32_bits(addr) & 0xff) << 4) | |
0xc; |
entry = cpu_to_le32(entry); |
rdev->gart.table.ram.ptr[i] = entry; |
rdev->gart.table.ram.ptr[i] = cpu_to_le32(((uint32_t)addr) | 0xC); |
return 0; |
} |
253,9 → 223,10 |
rs400_gpu_init(rdev); |
rs400_gart_disable(rdev); |
rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
rdev->mc.gtt_location = rdev->mc.vram_size; |
rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
r = radeon_mc_setup(rdev); |
if (r) { |
return r; |
267,7 → 238,7 |
"programming pipes. Bad things might happen.\n"); |
} |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(RADEON_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(RADEON_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32(RADEON_MC_FB_LOCATION, tmp); |
276,12 → 247,14 |
(void)RREG32(RADEON_HOST_PATH_CNTL); |
WREG32(RADEON_HOST_PATH_CNTL, tmp); |
(void)RREG32(RADEON_HOST_PATH_CNTL); |
return 0; |
} |
void rs400_mc_fini(struct radeon_device *rdev) |
{ |
rs400_gart_disable(rdev); |
radeon_gart_table_ram_free(rdev); |
radeon_gart_fini(rdev); |
} |
311,12 → 284,21 |
*/ |
void rs400_vram_info(struct radeon_device *rdev) |
{ |
uint32_t tom; |
rs400_gart_adjust_size(rdev); |
/* DDR for all card after R300 & IGP */ |
rdev->mc.vram_is_ddr = true; |
rdev->mc.vram_width = 128; |
r100_vram_init_sizes(rdev); |
/* read NB_TOM to get the amount of ram stolen for the GPU */ |
tom = RREG32(RADEON_NB_TOM); |
rdev->mc.vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16); |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
/* Could aper size report 0 ? */ |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
} |
/drivers/video/drm/radeon/rs600.c |
---|
28,10 → 28,7 |
#include "drmP.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "avivod.h" |
#include "rs600_reg_safe.h" |
/* rs600 depends on : */ |
void r100_hdp_reset(struct radeon_device *rdev); |
int r100_gui_wait_for_idle(struct radeon_device *rdev); |
226,7 → 223,7 |
printk(KERN_WARNING "Failed to wait MC idle while " |
"programming pipes. Bad things might happen.\n"); |
} |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(RS600_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(RS600_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32_MC(RS600_MC_FB_LOCATION, tmp); |
304,12 → 301,7 |
rdev->mc.vram_width = 128; |
} |
void rs600_bandwidth_update(struct radeon_device *rdev) |
{ |
/* FIXME: implement, should this be like rs690 ? */ |
} |
/* |
* Indirect registers accessor |
*/ |
/drivers/video/drm/radeon/rs690.c |
---|
28,9 → 28,6 |
#include "drmP.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "rs690r.h" |
#include "atom.h" |
#include "atom-bits.h" |
/* rs690,rs740 depends on : */ |
void r100_hdp_reset(struct radeon_device *rdev); |
67,7 → 64,7 |
rs400_gart_disable(rdev); |
/* Setup GPU memory space */ |
rdev->mc.gtt_location = rdev->mc.mc_vram_size; |
rdev->mc.gtt_location = rdev->mc.vram_size; |
rdev->mc.gtt_location += (rdev->mc.gtt_size - 1); |
rdev->mc.gtt_location &= ~(rdev->mc.gtt_size - 1); |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
82,7 → 79,7 |
printk(KERN_WARNING "Failed to wait MC idle while " |
"programming pipes. Bad things might happen.\n"); |
} |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(RS690_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(RS690_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32_MC(RS690_MCCFG_FB_LOCATION, tmp); |
94,6 → 91,9 |
void rs690_mc_fini(struct radeon_device *rdev) |
{ |
rs400_gart_disable(rdev); |
radeon_gart_table_ram_free(rdev); |
radeon_gart_fini(rdev); |
} |
138,82 → 138,9 |
/* |
* VRAM info. |
*/ |
void rs690_pm_info(struct radeon_device *rdev) |
{ |
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); |
struct _ATOM_INTEGRATED_SYSTEM_INFO *info; |
struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2; |
void *ptr; |
uint16_t data_offset; |
uint8_t frev, crev; |
fixed20_12 tmp; |
atom_parse_data_header(rdev->mode_info.atom_context, index, NULL, |
&frev, &crev, &data_offset); |
ptr = rdev->mode_info.atom_context->bios + data_offset; |
info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr; |
info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr; |
/* Get various system informations from bios */ |
switch (crev) { |
case 1: |
tmp.full = rfixed_const(100); |
rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock); |
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock)); |
rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock)); |
rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth); |
break; |
case 2: |
tmp.full = rfixed_const(100); |
rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock); |
rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp); |
rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock); |
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); |
rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq); |
rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp); |
rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth)); |
break; |
default: |
tmp.full = rfixed_const(100); |
/* We assume the slower possible clock ie worst case */ |
/* DDR 333Mhz */ |
rdev->pm.igp_sideport_mclk.full = rfixed_const(333); |
/* FIXME: system clock ? */ |
rdev->pm.igp_system_mclk.full = rfixed_const(100); |
rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp); |
rdev->pm.igp_ht_link_clk.full = rfixed_const(200); |
rdev->pm.igp_ht_link_width.full = rfixed_const(8); |
DRM_ERROR("No integrated system info for your GPU, using safe default\n"); |
break; |
} |
/* Compute various bandwidth */ |
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */ |
tmp.full = rfixed_const(4); |
rdev->pm.k8_bandwidth.full = rfixed_mul(rdev->pm.igp_system_mclk, tmp); |
/* ht_bandwidth = ht_clk * 2 * ht_width / 8 * 0.8 |
* = ht_clk * ht_width / 5 |
*/ |
tmp.full = rfixed_const(5); |
rdev->pm.ht_bandwidth.full = rfixed_mul(rdev->pm.igp_ht_link_clk, |
rdev->pm.igp_ht_link_width); |
rdev->pm.ht_bandwidth.full = rfixed_div(rdev->pm.ht_bandwidth, tmp); |
if (tmp.full < rdev->pm.max_bandwidth.full) { |
/* HT link is a limiting factor */ |
rdev->pm.max_bandwidth.full = tmp.full; |
} |
/* sideport_bandwidth = (sideport_clk / 2) * 2 * 2 * 0.7 |
* = (sideport_clk * 14) / 10 |
*/ |
tmp.full = rfixed_const(14); |
rdev->pm.sideport_bandwidth.full = rfixed_mul(rdev->pm.igp_sideport_mclk, tmp); |
tmp.full = rfixed_const(10); |
rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp); |
} |
void rs690_vram_info(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
fixed20_12 a; |
rs400_gart_adjust_size(rdev); |
/* DDR for all card after R300 & IGP */ |
225,411 → 152,14 |
} else { |
rdev->mc.vram_width = 64; |
} |
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
rs690_pm_info(rdev); |
/* FIXME: we should enforce default clock in case GPU is not in |
* default setup |
*/ |
a.full = rfixed_const(100); |
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); |
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
a.full = rfixed_const(16); |
/* core_bandwidth = sclk(Mhz) * 16 */ |
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a); |
} |
void rs690_line_buffer_adjust(struct radeon_device *rdev, |
struct drm_display_mode *mode1, |
struct drm_display_mode *mode2) |
{ |
u32 tmp; |
/* |
* Line Buffer Setup |
* There is a single line buffer shared by both display controllers. |
* DC_LB_MEMORY_SPLIT controls how that line buffer is shared between |
* the display controllers. The paritioning can either be done |
* manually or via one of four preset allocations specified in bits 1:0: |
* 0 - line buffer is divided in half and shared between crtc |
* 1 - D1 gets 3/4 of the line buffer, D2 gets 1/4 |
* 2 - D1 gets the whole buffer |
* 3 - D1 gets 1/4 of the line buffer, D2 gets 3/4 |
* Setting bit 2 of DC_LB_MEMORY_SPLIT controls switches to manual |
* allocation mode. In manual allocation mode, D1 always starts at 0, |
* D1 end/2 is specified in bits 14:4; D2 allocation follows D1. |
*/ |
tmp = RREG32(DC_LB_MEMORY_SPLIT) & ~DC_LB_MEMORY_SPLIT_MASK; |
tmp &= ~DC_LB_MEMORY_SPLIT_SHIFT_MODE; |
/* auto */ |
if (mode1 && mode2) { |
if (mode1->hdisplay > mode2->hdisplay) { |
if (mode1->hdisplay > 2560) |
tmp |= DC_LB_MEMORY_SPLIT_D1_3Q_D2_1Q; |
else |
tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
} else if (mode2->hdisplay > mode1->hdisplay) { |
if (mode2->hdisplay > 2560) |
tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
else |
tmp |= DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
} else |
tmp |= AVIVO_DC_LB_MEMORY_SPLIT_D1HALF_D2HALF; |
} else if (mode1) { |
tmp |= DC_LB_MEMORY_SPLIT_D1_ONLY; |
} else if (mode2) { |
tmp |= DC_LB_MEMORY_SPLIT_D1_1Q_D2_3Q; |
} |
WREG32(DC_LB_MEMORY_SPLIT, tmp); |
} |
struct rs690_watermark { |
u32 lb_request_fifo_depth; |
fixed20_12 num_line_pair; |
fixed20_12 estimated_width; |
fixed20_12 worst_case_latency; |
fixed20_12 consumption_rate; |
fixed20_12 active_time; |
fixed20_12 dbpp; |
fixed20_12 priority_mark_max; |
fixed20_12 priority_mark; |
fixed20_12 sclk; |
}; |
void rs690_crtc_bandwidth_compute(struct radeon_device *rdev, |
struct radeon_crtc *crtc, |
struct rs690_watermark *wm) |
{ |
struct drm_display_mode *mode = &crtc->base.mode; |
fixed20_12 a, b, c; |
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
/* FIXME: detect IGP with sideport memory, i don't think there is any |
* such product available |
*/ |
bool sideport = false; |
if (!crtc->base.enabled) { |
/* FIXME: wouldn't it better to set priority mark to maximum */ |
wm->lb_request_fifo_depth = 4; |
return; |
} |
if (crtc->vsc.full > rfixed_const(2)) |
wm->num_line_pair.full = rfixed_const(2); |
else |
wm->num_line_pair.full = rfixed_const(1); |
b.full = rfixed_const(mode->crtc_hdisplay); |
c.full = rfixed_const(256); |
a.full = rfixed_mul(wm->num_line_pair, b); |
request_fifo_depth.full = rfixed_div(a, c); |
if (a.full < rfixed_const(4)) { |
wm->lb_request_fifo_depth = 4; |
} else { |
wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); |
} |
/* Determine consumption rate |
* pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) |
* vtaps = number of vertical taps, |
* vsc = vertical scaling ratio, defined as source/destination |
* hsc = horizontal scaling ration, defined as source/destination |
*/ |
a.full = rfixed_const(mode->clock); |
b.full = rfixed_const(1000); |
a.full = rfixed_div(a, b); |
pclk.full = rfixed_div(b, a); |
if (crtc->rmx_type != RMX_OFF) { |
b.full = rfixed_const(2); |
if (crtc->vsc.full > b.full) |
b.full = crtc->vsc.full; |
b.full = rfixed_mul(b, crtc->hsc); |
c.full = rfixed_const(2); |
b.full = rfixed_div(b, c); |
consumption_time.full = rfixed_div(pclk, b); |
} else { |
consumption_time.full = pclk.full; |
} |
a.full = rfixed_const(1); |
wm->consumption_rate.full = rfixed_div(a, consumption_time); |
/* Determine line time |
* LineTime = total time for one line of displayhtotal |
* LineTime = total number of horizontal pixels |
* pclk = pixel clock period(ns) |
*/ |
a.full = rfixed_const(crtc->base.mode.crtc_htotal); |
line_time.full = rfixed_mul(a, pclk); |
/* Determine active time |
* ActiveTime = time of active region of display within one line, |
* hactive = total number of horizontal active pixels |
* htotal = total number of horizontal pixels |
*/ |
a.full = rfixed_const(crtc->base.mode.crtc_htotal); |
b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); |
wm->active_time.full = rfixed_mul(line_time, b); |
wm->active_time.full = rfixed_div(wm->active_time, a); |
/* Maximun bandwidth is the minimun bandwidth of all component */ |
rdev->pm.max_bandwidth = rdev->pm.core_bandwidth; |
if (sideport) { |
if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full && |
rdev->pm.sideport_bandwidth.full) |
rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth; |
read_delay_latency.full = rfixed_const(370 * 800 * 1000); |
read_delay_latency.full = rfixed_div(read_delay_latency, |
rdev->pm.igp_sideport_mclk); |
} else { |
if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && |
rdev->pm.k8_bandwidth.full) |
rdev->pm.max_bandwidth = rdev->pm.k8_bandwidth; |
if (rdev->pm.max_bandwidth.full > rdev->pm.ht_bandwidth.full && |
rdev->pm.ht_bandwidth.full) |
rdev->pm.max_bandwidth = rdev->pm.ht_bandwidth; |
read_delay_latency.full = rfixed_const(5000); |
} |
/* sclk = system clocks(ns) = 1000 / max_bandwidth / 16 */ |
a.full = rfixed_const(16); |
rdev->pm.sclk.full = rfixed_mul(rdev->pm.max_bandwidth, a); |
a.full = rfixed_const(1000); |
rdev->pm.sclk.full = rfixed_div(a, rdev->pm.sclk); |
/* Determine chunk time |
* ChunkTime = the time it takes the DCP to send one chunk of data |
* to the LB which consists of pipeline delay and inter chunk gap |
* sclk = system clock(ns) |
*/ |
a.full = rfixed_const(256 * 13); |
chunk_time.full = rfixed_mul(rdev->pm.sclk, a); |
a.full = rfixed_const(10); |
chunk_time.full = rfixed_div(chunk_time, a); |
/* Determine the worst case latency |
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
* WorstCaseLatency = worst case time from urgent to when the MC starts |
* to return data |
* READ_DELAY_IDLE_MAX = constant of 1us |
* ChunkTime = time it takes the DCP to send one chunk of data to the LB |
* which consists of pipeline delay and inter chunk gap |
*/ |
if (rfixed_trunc(wm->num_line_pair) > 1) { |
a.full = rfixed_const(3); |
wm->worst_case_latency.full = rfixed_mul(a, chunk_time); |
wm->worst_case_latency.full += read_delay_latency.full; |
} else { |
a.full = rfixed_const(2); |
wm->worst_case_latency.full = rfixed_mul(a, chunk_time); |
wm->worst_case_latency.full += read_delay_latency.full; |
} |
/* Determine the tolerable latency |
* TolerableLatency = Any given request has only 1 line time |
* for the data to be returned |
* LBRequestFifoDepth = Number of chunk requests the LB can |
* put into the request FIFO for a display |
* LineTime = total time for one line of display |
* ChunkTime = the time it takes the DCP to send one chunk |
* of data to the LB which consists of |
* pipeline delay and inter chunk gap |
*/ |
if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { |
tolerable_latency.full = line_time.full; |
} else { |
tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); |
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); |
tolerable_latency.full = line_time.full - tolerable_latency.full; |
} |
/* We assume worst case 32bits (4 bytes) */ |
wm->dbpp.full = rfixed_const(4 * 8); |
/* Determine the maximum priority mark |
* width = viewport width in pixels |
*/ |
a.full = rfixed_const(16); |
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); |
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); |
/* Determine estimated width */ |
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
estimated_width.full = rfixed_div(estimated_width, consumption_time); |
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
wm->priority_mark.full = rfixed_const(10); |
} else { |
a.full = rfixed_const(16); |
wm->priority_mark.full = rfixed_div(estimated_width, a); |
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
} |
} |
void rs690_bandwidth_update(struct radeon_device *rdev) |
{ |
struct drm_display_mode *mode0 = NULL; |
struct drm_display_mode *mode1 = NULL; |
struct rs690_watermark wm0; |
struct rs690_watermark wm1; |
u32 tmp; |
fixed20_12 priority_mark02, priority_mark12, fill_rate; |
fixed20_12 a, b; |
if (rdev->mode_info.crtcs[0]->base.enabled) |
mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
if (rdev->mode_info.crtcs[1]->base.enabled) |
mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
/* |
* Set display0/1 priority up in the memory controller for |
* modes if the user specifies HIGH for displaypriority |
* option. |
*/ |
if (rdev->disp_priority == 2) { |
tmp = RREG32_MC(MC_INIT_MISC_LAT_TIMER); |
tmp &= ~MC_DISP1R_INIT_LAT_MASK; |
tmp &= ~MC_DISP0R_INIT_LAT_MASK; |
if (mode1) |
tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); |
if (mode0) |
tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); |
WREG32_MC(MC_INIT_MISC_LAT_TIMER, tmp); |
} |
rs690_line_buffer_adjust(rdev, mode0, mode1); |
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) |
WREG32(DCP_CONTROL, 0); |
if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) |
WREG32(DCP_CONTROL, 2); |
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); |
rs690_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); |
tmp = (wm0.lb_request_fifo_depth - 1); |
tmp |= (wm1.lb_request_fifo_depth - 1) << 16; |
WREG32(LB_MAX_REQ_OUTSTANDING, tmp); |
if (mode0 && mode1) { |
if (rfixed_trunc(wm0.dbpp) > 64) |
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
else |
a.full = wm0.num_line_pair.full; |
if (rfixed_trunc(wm1.dbpp) > 64) |
b.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); |
else |
b.full = wm1.num_line_pair.full; |
a.full += b.full; |
fill_rate.full = rfixed_div(wm0.sclk, a); |
if (wm0.consumption_rate.full > fill_rate.full) { |
b.full = wm0.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm0.active_time); |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
a.full = a.full + b.full; |
b.full = rfixed_const(16 * 1000); |
priority_mark02.full = rfixed_div(a, b); |
} else { |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark02.full = rfixed_div(a, b); |
} |
if (wm1.consumption_rate.full > fill_rate.full) { |
b.full = wm1.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm1.active_time); |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
a.full = a.full + b.full; |
b.full = rfixed_const(16 * 1000); |
priority_mark12.full = rfixed_div(a, b); |
} else { |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark12.full = rfixed_div(a, b); |
} |
if (wm0.priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark.full; |
if (rfixed_trunc(priority_mark02) < 0) |
priority_mark02.full = 0; |
if (wm0.priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark_max.full; |
if (wm1.priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark.full; |
if (rfixed_trunc(priority_mark12) < 0) |
priority_mark12.full = 0; |
if (wm1.priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark_max.full; |
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
} else if (mode0) { |
if (rfixed_trunc(wm0.dbpp) > 64) |
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair); |
else |
a.full = wm0.num_line_pair.full; |
fill_rate.full = rfixed_div(wm0.sclk, a); |
if (wm0.consumption_rate.full > fill_rate.full) { |
b.full = wm0.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm0.active_time); |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
a.full = a.full + b.full; |
b.full = rfixed_const(16 * 1000); |
priority_mark02.full = rfixed_div(a, b); |
} else { |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark02.full = rfixed_div(a, b); |
} |
if (wm0.priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark.full; |
if (rfixed_trunc(priority_mark02) < 0) |
priority_mark02.full = 0; |
if (wm0.priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark_max.full; |
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
} else { |
if (rfixed_trunc(wm1.dbpp) > 64) |
a.full = rfixed_mul(wm1.dbpp, wm1.num_line_pair); |
else |
a.full = wm1.num_line_pair.full; |
fill_rate.full = rfixed_div(wm1.sclk, a); |
if (wm1.consumption_rate.full > fill_rate.full) { |
b.full = wm1.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm1.active_time); |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
a.full = a.full + b.full; |
b.full = rfixed_const(16 * 1000); |
priority_mark12.full = rfixed_div(a, b); |
} else { |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark12.full = rfixed_div(a, b); |
} |
if (wm1.priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark.full; |
if (rfixed_trunc(priority_mark12) < 0) |
priority_mark12.full = 0; |
if (wm1.priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark_max.full; |
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
} |
} |
/* |
* Indirect registers accessor |
*/ |
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg) |
/drivers/video/drm/radeon/rv515.c |
---|
25,12 → 25,11 |
* Alex Deucher |
* Jerome Glisse |
*/ |
#include <linux/seq_file.h> |
//#include <linux/seq_file.h> |
#include "drmP.h" |
#include "rv515d.h" |
#include "radeon_reg.h" |
#include "radeon.h" |
#include "rv515_reg_safe.h" |
/* rv515 depends on : */ |
void r100_hdp_reset(struct radeon_device *rdev); |
int r100_cp_reset(struct radeon_device *rdev); |
37,6 → 36,8 |
int r100_rb2d_reset(struct radeon_device *rdev); |
int r100_gui_wait_for_idle(struct radeon_device *rdev); |
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); |
int rv370_pcie_gart_enable(struct radeon_device *rdev); |
void rv370_pcie_gart_disable(struct radeon_device *rdev); |
void r420_pipes_init(struct radeon_device *rdev); |
void rs600_mc_disable_clients(struct radeon_device *rdev); |
void rs600_disable_vga(struct radeon_device *rdev); |
51,7 → 52,6 |
void rv515_gpu_init(struct radeon_device *rdev); |
int rv515_mc_wait_for_idle(struct radeon_device *rdev); |
/* |
* MC |
*/ |
76,16 → 76,16 |
/* Setup GPU memory space */ |
rdev->mc.vram_location = 0xFFFFFFFFUL; |
rdev->mc.gtt_location = 0xFFFFFFFFUL; |
// if (rdev->flags & RADEON_IS_AGP) { |
// r = radeon_agp_init(rdev); |
// if (r) { |
// printk(KERN_WARNING "[drm] Disabling AGP\n"); |
// rdev->flags &= ~RADEON_IS_AGP; |
// rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
// } else { |
// rdev->mc.gtt_location = rdev->mc.agp_base; |
// } |
// } |
if (rdev->flags & RADEON_IS_AGP) { |
r = radeon_agp_init(rdev); |
if (r) { |
printk(KERN_WARNING "[drm] Disabling AGP\n"); |
rdev->flags &= ~RADEON_IS_AGP; |
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; |
} else { |
rdev->mc.gtt_location = rdev->mc.agp_base; |
} |
} |
r = radeon_mc_setup(rdev); |
if (r) { |
return r; |
98,26 → 98,26 |
"programming pipes. Bad things might happen.\n"); |
} |
/* Write VRAM size in case we are limiting it */ |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); |
tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.vram_size); |
tmp = REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32(0x134, tmp); |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
tmp = REG_SET(MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32_MC(MC_FB_LOCATION, tmp); |
WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(RV515_MC_FB_TOP, tmp >> 16); |
tmp |= REG_SET(RV515_MC_FB_START, rdev->mc.vram_location >> 16); |
WREG32_MC(RV515_MC_FB_LOCATION, tmp); |
WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16); |
WREG32(0x310, rdev->mc.vram_location); |
if (rdev->flags & RADEON_IS_AGP) { |
tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1; |
tmp = REG_SET(MC_AGP_TOP, tmp >> 16); |
tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16); |
WREG32_MC(MC_AGP_LOCATION, tmp); |
WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base); |
WREG32_MC(MC_AGP_BASE_2, 0); |
tmp = REG_SET(RV515_MC_AGP_TOP, tmp >> 16); |
tmp |= REG_SET(RV515_MC_AGP_START, rdev->mc.gtt_location >> 16); |
WREG32_MC(RV515_MC_AGP_LOCATION, tmp); |
WREG32_MC(RV515_MC_AGP_BASE, rdev->mc.agp_base); |
WREG32_MC(RV515_MC_AGP_BASE_2, 0); |
} else { |
WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF); |
WREG32_MC(MC_AGP_BASE, 0); |
WREG32_MC(MC_AGP_BASE_2, 0); |
WREG32_MC(RV515_MC_AGP_LOCATION, 0x0FFFFFFF); |
WREG32_MC(RV515_MC_AGP_BASE, 0); |
WREG32_MC(RV515_MC_AGP_BASE_2, 0); |
} |
return 0; |
} |
124,6 → 124,9 |
void rv515_mc_fini(struct radeon_device *rdev) |
{ |
rv370_pcie_gart_disable(rdev); |
radeon_gart_table_vram_free(rdev); |
radeon_gart_fini(rdev); |
} |
132,74 → 135,101 |
*/ |
void rv515_ring_start(struct radeon_device *rdev) |
{ |
unsigned gb_tile_config; |
int r; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
/* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
gb_tile_config = R300_ENABLE_TILING | R300_TILE_SIZE_16; |
switch (rdev->num_gb_pipes) { |
case 2: |
gb_tile_config |= R300_PIPE_COUNT_R300; |
break; |
case 3: |
gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
break; |
case 4: |
gb_tile_config |= R300_PIPE_COUNT_R420; |
break; |
case 1: |
default: |
gb_tile_config |= R300_PIPE_COUNT_RV350; |
break; |
} |
r = radeon_ring_lock(rdev, 64); |
if (r) { |
return; |
} |
radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0)); |
radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0)); |
radeon_ring_write(rdev, |
ISYNC_ANY2D_IDLE3D | |
ISYNC_ANY3D_IDLE2D | |
ISYNC_WAIT_IDLEGUI | |
ISYNC_CPSCRATCH_IDLEGUI); |
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
RADEON_ISYNC_ANY2D_IDLE3D | |
RADEON_ISYNC_ANY3D_IDLE2D | |
RADEON_ISYNC_WAIT_IDLEGUI | |
RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0)); |
radeon_ring_write(rdev, gb_tile_config); |
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
radeon_ring_write(rdev, |
RADEON_WAIT_2D_IDLECLEAN | |
RADEON_WAIT_3D_IDLECLEAN); |
radeon_ring_write(rdev, PACKET0(0x170C, 0)); |
radeon_ring_write(rdev, 1 << 31); |
radeon_ring_write(rdev, PACKET0(GB_SELECT, 0)); |
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0)); |
radeon_ring_write(rdev, 0); |
radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0)); |
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0)); |
radeon_ring_write(rdev, 0); |
radeon_ring_write(rdev, PACKET0(0x42C8, 0)); |
radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1); |
radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0)); |
radeon_ring_write(rdev, PACKET0(R500_VAP_INDEX_OFFSET, 0)); |
radeon_ring_write(rdev, 0); |
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0)); |
radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN); |
radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0)); |
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0)); |
radeon_ring_write(rdev, |
RADEON_WAIT_2D_IDLECLEAN | |
RADEON_WAIT_3D_IDLECLEAN); |
radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0)); |
radeon_ring_write(rdev, 0); |
radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE); |
radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE); |
radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0)); |
radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE); |
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0)); |
radeon_ring_write(rdev, |
((6 << MS_X0_SHIFT) | |
(6 << MS_Y0_SHIFT) | |
(6 << MS_X1_SHIFT) | |
(6 << MS_Y1_SHIFT) | |
(6 << MS_X2_SHIFT) | |
(6 << MS_Y2_SHIFT) | |
(6 << MSBD0_Y_SHIFT) | |
(6 << MSBD0_X_SHIFT))); |
radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0)); |
((6 << R300_MS_X0_SHIFT) | |
(6 << R300_MS_Y0_SHIFT) | |
(6 << R300_MS_X1_SHIFT) | |
(6 << R300_MS_Y1_SHIFT) | |
(6 << R300_MS_X2_SHIFT) | |
(6 << R300_MS_Y2_SHIFT) | |
(6 << R300_MSBD0_Y_SHIFT) | |
(6 << R300_MSBD0_X_SHIFT))); |
radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0)); |
radeon_ring_write(rdev, |
((6 << MS_X3_SHIFT) | |
(6 << MS_Y3_SHIFT) | |
(6 << MS_X4_SHIFT) | |
(6 << MS_Y4_SHIFT) | |
(6 << MS_X5_SHIFT) | |
(6 << MS_Y5_SHIFT) | |
(6 << MSBD1_SHIFT))); |
radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0)); |
radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL); |
radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0)); |
radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE); |
radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0)); |
radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST); |
((6 << R300_MS_X3_SHIFT) | |
(6 << R300_MS_Y3_SHIFT) | |
(6 << R300_MS_X4_SHIFT) | |
(6 << R300_MS_Y4_SHIFT) | |
(6 << R300_MS_X5_SHIFT) | |
(6 << R300_MS_Y5_SHIFT) | |
(6 << R300_MSBD1_SHIFT))); |
radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0)); |
radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0)); |
radeon_ring_write(rdev, |
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0)); |
radeon_ring_write(rdev, |
R300_GEOMETRY_ROUND_NEAREST | |
R300_COLOR_ROUND_NEAREST); |
radeon_ring_write(rdev, PACKET0(0x20C8, 0)); |
radeon_ring_write(rdev, 0); |
radeon_ring_unlock_commit(rdev); |
LEAVE(); |
dbgprintf("done %s\n",__FUNCTION__); |
} |
215,8 → 245,8 |
for (i = 0; i < rdev->usec_timeout; i++) { |
/* read MC_STATUS */ |
tmp = RREG32_MC(MC_STATUS); |
if (tmp & MC_STATUS_IDLE) { |
tmp = RREG32_MC(RV515_MC_STATUS); |
if (tmp & RV515_MC_STATUS_IDLE) { |
return 0; |
} |
DRM_UDELAY(1); |
255,6 → 285,7 |
} |
} |
int rv515_ga_reset(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
261,25 → 292,25 |
bool reinit_cp; |
int i; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
reinit_cp = rdev->cp.ready; |
rdev->cp.ready = false; |
for (i = 0; i < rdev->usec_timeout; i++) { |
WREG32(CP_CSQ_MODE, 0); |
WREG32(CP_CSQ_CNTL, 0); |
WREG32(RBBM_SOFT_RESET, 0x32005); |
(void)RREG32(RBBM_SOFT_RESET); |
WREG32(RADEON_CP_CSQ_MODE, 0); |
WREG32(RADEON_CP_CSQ_CNTL, 0); |
WREG32(RADEON_RBBM_SOFT_RESET, 0x32005); |
(void)RREG32(RADEON_RBBM_SOFT_RESET); |
udelay(200); |
WREG32(RBBM_SOFT_RESET, 0); |
WREG32(RADEON_RBBM_SOFT_RESET, 0); |
/* Wait to prevent race in RBBM_STATUS */ |
mdelay(1); |
tmp = RREG32(RBBM_STATUS); |
tmp = RREG32(RADEON_RBBM_STATUS); |
if (tmp & ((1 << 20) | (1 << 26))) { |
DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)\n", tmp); |
/* GA still busy soft reset it */ |
WREG32(0x429C, 0x200); |
WREG32(VAP_PVS_STATE_FLUSH_REG, 0); |
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0); |
WREG32(0x43E0, 0); |
WREG32(0x43E4, 0); |
WREG32(0x24AC, 0); |
286,13 → 317,13 |
} |
/* Wait to prevent race in RBBM_STATUS */ |
mdelay(1); |
tmp = RREG32(RBBM_STATUS); |
tmp = RREG32(RADEON_RBBM_STATUS); |
if (!(tmp & ((1 << 20) | (1 << 26)))) { |
break; |
} |
} |
for (i = 0; i < rdev->usec_timeout; i++) { |
tmp = RREG32(RBBM_STATUS); |
tmp = RREG32(RADEON_RBBM_STATUS); |
if (!(tmp & ((1 << 20) | (1 << 26)))) { |
DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n", |
tmp); |
306,7 → 337,7 |
} |
DRM_UDELAY(1); |
} |
tmp = RREG32(RBBM_STATUS); |
tmp = RREG32(RADEON_RBBM_STATUS); |
DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp); |
return -1; |
} |
315,10 → 346,10 |
{ |
uint32_t status; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
/* reset order likely matter */ |
status = RREG32(RBBM_STATUS); |
status = RREG32(RADEON_RBBM_STATUS); |
/* reset HDP */ |
r100_hdp_reset(rdev); |
/* reset rb2d */ |
330,12 → 361,12 |
rv515_ga_reset(rdev); |
} |
/* reset CP */ |
status = RREG32(RBBM_STATUS); |
status = RREG32(RADEON_RBBM_STATUS); |
if (status & (1 << 16)) { |
r100_cp_reset(rdev); |
} |
/* Check if GPU is idle */ |
status = RREG32(RBBM_STATUS); |
status = RREG32(RADEON_RBBM_STATUS); |
if (status & (1 << 31)) { |
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status); |
return -1; |
354,7 → 385,8 |
rdev->mc.vram_width = 128; |
rdev->mc.vram_is_ddr = true; |
tmp = RREG32_MC(RV515_MC_CNTL) & MEM_NUM_CHANNELS_MASK; |
tmp = RREG32_MC(RV515_MC_CNTL); |
tmp &= RV515_MEM_NUM_CHANNELS_MASK; |
switch (tmp) { |
case 0: |
rdev->mc.vram_width = 64; |
370,17 → 402,11 |
void rv515_vram_info(struct radeon_device *rdev) |
{ |
fixed20_12 a; |
rv515_vram_get_type(rdev); |
rdev->mc.vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
r100_vram_init_sizes(rdev); |
/* FIXME: we should enforce default clock in case GPU is not in |
* default setup |
*/ |
a.full = rfixed_const(100); |
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk); |
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0); |
} |
391,19 → 417,38 |
{ |
uint32_t r; |
WREG32(MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
r = RREG32(MC_IND_DATA); |
WREG32(MC_IND_INDEX, 0); |
WREG32(R520_MC_IND_INDEX, 0x7f0000 | (reg & 0xffff)); |
r = RREG32(R520_MC_IND_DATA); |
WREG32(R520_MC_IND_INDEX, 0); |
return r; |
} |
void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
WREG32(MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
WREG32(MC_IND_DATA, (v)); |
WREG32(MC_IND_INDEX, 0); |
WREG32(R520_MC_IND_INDEX, 0xff0000 | ((reg) & 0xffff)); |
WREG32(R520_MC_IND_DATA, (v)); |
WREG32(R520_MC_IND_INDEX, 0); |
} |
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
uint32_t r; |
WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); |
(void)RREG32(RADEON_PCIE_INDEX); |
r = RREG32(RADEON_PCIE_DATA); |
return r; |
} |
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
WREG32(RADEON_PCIE_INDEX, ((reg) & 0x7ff)); |
(void)RREG32(RADEON_PCIE_INDEX); |
WREG32(RADEON_PCIE_DATA, (v)); |
(void)RREG32(RADEON_PCIE_DATA); |
} |
/* |
* Debugfs info |
*/ |
415,13 → 460,13 |
struct radeon_device *rdev = dev->dev_private; |
uint32_t tmp; |
tmp = RREG32(GB_PIPE_SELECT); |
tmp = RREG32(R400_GB_PIPE_SELECT); |
seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); |
tmp = RREG32(SU_REG_DEST); |
tmp = RREG32(R500_SU_REG_DEST); |
seq_printf(m, "SU_REG_DEST 0x%08x\n", tmp); |
tmp = RREG32(GB_TILE_CONFIG); |
tmp = RREG32(R300_GB_TILE_CONFIG); |
seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); |
tmp = RREG32(DST_PIPE_CONFIG); |
tmp = RREG32(R300_DST_PIPE_CONFIG); |
seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); |
return 0; |
} |
468,564 → 513,60 |
#endif |
} |
/* |
* Asic initialization |
*/ |
int rv515_init(struct radeon_device *rdev) |
{ |
ENTER(); |
rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; |
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); |
return 0; |
} |
void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *crtc) |
{ |
int index_reg = 0x6578 + crtc->crtc_offset; |
int data_reg = 0x657c + crtc->crtc_offset; |
WREG32(0x659C + crtc->crtc_offset, 0x0); |
WREG32(0x6594 + crtc->crtc_offset, 0x705); |
WREG32(0x65A4 + crtc->crtc_offset, 0x10001); |
WREG32(0x65D8 + crtc->crtc_offset, 0x0); |
WREG32(0x65B0 + crtc->crtc_offset, 0x0); |
WREG32(0x65C0 + crtc->crtc_offset, 0x0); |
WREG32(0x65D4 + crtc->crtc_offset, 0x0); |
WREG32(index_reg, 0x0); |
WREG32(data_reg, 0x841880A8); |
WREG32(index_reg, 0x1); |
WREG32(data_reg, 0x84208680); |
WREG32(index_reg, 0x2); |
WREG32(data_reg, 0xBFF880B0); |
WREG32(index_reg, 0x100); |
WREG32(data_reg, 0x83D88088); |
WREG32(index_reg, 0x101); |
WREG32(data_reg, 0x84608680); |
WREG32(index_reg, 0x102); |
WREG32(data_reg, 0xBFF080D0); |
WREG32(index_reg, 0x200); |
WREG32(data_reg, 0x83988068); |
WREG32(index_reg, 0x201); |
WREG32(data_reg, 0x84A08680); |
WREG32(index_reg, 0x202); |
WREG32(data_reg, 0xBFF080F8); |
WREG32(index_reg, 0x300); |
WREG32(data_reg, 0x83588058); |
WREG32(index_reg, 0x301); |
WREG32(data_reg, 0x84E08660); |
WREG32(index_reg, 0x302); |
WREG32(data_reg, 0xBFF88120); |
WREG32(index_reg, 0x400); |
WREG32(data_reg, 0x83188040); |
WREG32(index_reg, 0x401); |
WREG32(data_reg, 0x85008660); |
WREG32(index_reg, 0x402); |
WREG32(data_reg, 0xBFF88150); |
WREG32(index_reg, 0x500); |
WREG32(data_reg, 0x82D88030); |
WREG32(index_reg, 0x501); |
WREG32(data_reg, 0x85408640); |
WREG32(index_reg, 0x502); |
WREG32(data_reg, 0xBFF88180); |
WREG32(index_reg, 0x600); |
WREG32(data_reg, 0x82A08018); |
WREG32(index_reg, 0x601); |
WREG32(data_reg, 0x85808620); |
WREG32(index_reg, 0x602); |
WREG32(data_reg, 0xBFF081B8); |
WREG32(index_reg, 0x700); |
WREG32(data_reg, 0x82608010); |
WREG32(index_reg, 0x701); |
WREG32(data_reg, 0x85A08600); |
WREG32(index_reg, 0x702); |
WREG32(data_reg, 0x800081F0); |
WREG32(index_reg, 0x800); |
WREG32(data_reg, 0x8228BFF8); |
WREG32(index_reg, 0x801); |
WREG32(data_reg, 0x85E085E0); |
WREG32(index_reg, 0x802); |
WREG32(data_reg, 0xBFF88228); |
WREG32(index_reg, 0x10000); |
WREG32(data_reg, 0x82A8BF00); |
WREG32(index_reg, 0x10001); |
WREG32(data_reg, 0x82A08CC0); |
WREG32(index_reg, 0x10002); |
WREG32(data_reg, 0x8008BEF8); |
WREG32(index_reg, 0x10100); |
WREG32(data_reg, 0x81F0BF28); |
WREG32(index_reg, 0x10101); |
WREG32(data_reg, 0x83608CA0); |
WREG32(index_reg, 0x10102); |
WREG32(data_reg, 0x8018BED0); |
WREG32(index_reg, 0x10200); |
WREG32(data_reg, 0x8148BF38); |
WREG32(index_reg, 0x10201); |
WREG32(data_reg, 0x84408C80); |
WREG32(index_reg, 0x10202); |
WREG32(data_reg, 0x8008BEB8); |
WREG32(index_reg, 0x10300); |
WREG32(data_reg, 0x80B0BF78); |
WREG32(index_reg, 0x10301); |
WREG32(data_reg, 0x85008C20); |
WREG32(index_reg, 0x10302); |
WREG32(data_reg, 0x8020BEA0); |
WREG32(index_reg, 0x10400); |
WREG32(data_reg, 0x8028BF90); |
WREG32(index_reg, 0x10401); |
WREG32(data_reg, 0x85E08BC0); |
WREG32(index_reg, 0x10402); |
WREG32(data_reg, 0x8018BE90); |
WREG32(index_reg, 0x10500); |
WREG32(data_reg, 0xBFB8BFB0); |
WREG32(index_reg, 0x10501); |
WREG32(data_reg, 0x86C08B40); |
WREG32(index_reg, 0x10502); |
WREG32(data_reg, 0x8010BE90); |
WREG32(index_reg, 0x10600); |
WREG32(data_reg, 0xBF58BFC8); |
WREG32(index_reg, 0x10601); |
WREG32(data_reg, 0x87A08AA0); |
WREG32(index_reg, 0x10602); |
WREG32(data_reg, 0x8010BE98); |
WREG32(index_reg, 0x10700); |
WREG32(data_reg, 0xBF10BFF0); |
WREG32(index_reg, 0x10701); |
WREG32(data_reg, 0x886089E0); |
WREG32(index_reg, 0x10702); |
WREG32(data_reg, 0x8018BEB0); |
WREG32(index_reg, 0x10800); |
WREG32(data_reg, 0xBED8BFE8); |
WREG32(index_reg, 0x10801); |
WREG32(data_reg, 0x89408940); |
WREG32(index_reg, 0x10802); |
WREG32(data_reg, 0xBFE8BED8); |
WREG32(index_reg, 0x20000); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x20001); |
WREG32(data_reg, 0x90008000); |
WREG32(index_reg, 0x20002); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x20003); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x20100); |
WREG32(data_reg, 0x80108000); |
WREG32(index_reg, 0x20101); |
WREG32(data_reg, 0x8FE0BF70); |
WREG32(index_reg, 0x20102); |
WREG32(data_reg, 0xBFE880C0); |
WREG32(index_reg, 0x20103); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x20200); |
WREG32(data_reg, 0x8018BFF8); |
WREG32(index_reg, 0x20201); |
WREG32(data_reg, 0x8F80BF08); |
WREG32(index_reg, 0x20202); |
WREG32(data_reg, 0xBFD081A0); |
WREG32(index_reg, 0x20203); |
WREG32(data_reg, 0xBFF88000); |
WREG32(index_reg, 0x20300); |
WREG32(data_reg, 0x80188000); |
WREG32(index_reg, 0x20301); |
WREG32(data_reg, 0x8EE0BEC0); |
WREG32(index_reg, 0x20302); |
WREG32(data_reg, 0xBFB082A0); |
WREG32(index_reg, 0x20303); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x20400); |
WREG32(data_reg, 0x80188000); |
WREG32(index_reg, 0x20401); |
WREG32(data_reg, 0x8E00BEA0); |
WREG32(index_reg, 0x20402); |
WREG32(data_reg, 0xBF8883C0); |
WREG32(index_reg, 0x20403); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x20500); |
WREG32(data_reg, 0x80188000); |
WREG32(index_reg, 0x20501); |
WREG32(data_reg, 0x8D00BE90); |
WREG32(index_reg, 0x20502); |
WREG32(data_reg, 0xBF588500); |
WREG32(index_reg, 0x20503); |
WREG32(data_reg, 0x80008008); |
WREG32(index_reg, 0x20600); |
WREG32(data_reg, 0x80188000); |
WREG32(index_reg, 0x20601); |
WREG32(data_reg, 0x8BC0BE98); |
WREG32(index_reg, 0x20602); |
WREG32(data_reg, 0xBF308660); |
WREG32(index_reg, 0x20603); |
WREG32(data_reg, 0x80008008); |
WREG32(index_reg, 0x20700); |
WREG32(data_reg, 0x80108000); |
WREG32(index_reg, 0x20701); |
WREG32(data_reg, 0x8A80BEB0); |
WREG32(index_reg, 0x20702); |
WREG32(data_reg, 0xBF0087C0); |
WREG32(index_reg, 0x20703); |
WREG32(data_reg, 0x80008008); |
WREG32(index_reg, 0x20800); |
WREG32(data_reg, 0x80108000); |
WREG32(index_reg, 0x20801); |
WREG32(data_reg, 0x8920BED0); |
WREG32(index_reg, 0x20802); |
WREG32(data_reg, 0xBED08920); |
WREG32(index_reg, 0x20803); |
WREG32(data_reg, 0x80008010); |
WREG32(index_reg, 0x30000); |
WREG32(data_reg, 0x90008000); |
WREG32(index_reg, 0x30001); |
WREG32(data_reg, 0x80008000); |
WREG32(index_reg, 0x30100); |
WREG32(data_reg, 0x8FE0BF90); |
WREG32(index_reg, 0x30101); |
WREG32(data_reg, 0xBFF880A0); |
WREG32(index_reg, 0x30200); |
WREG32(data_reg, 0x8F60BF40); |
WREG32(index_reg, 0x30201); |
WREG32(data_reg, 0xBFE88180); |
WREG32(index_reg, 0x30300); |
WREG32(data_reg, 0x8EC0BF00); |
WREG32(index_reg, 0x30301); |
WREG32(data_reg, 0xBFC88280); |
WREG32(index_reg, 0x30400); |
WREG32(data_reg, 0x8DE0BEE0); |
WREG32(index_reg, 0x30401); |
WREG32(data_reg, 0xBFA083A0); |
WREG32(index_reg, 0x30500); |
WREG32(data_reg, 0x8CE0BED0); |
WREG32(index_reg, 0x30501); |
WREG32(data_reg, 0xBF7884E0); |
WREG32(index_reg, 0x30600); |
WREG32(data_reg, 0x8BA0BED8); |
WREG32(index_reg, 0x30601); |
WREG32(data_reg, 0xBF508640); |
WREG32(index_reg, 0x30700); |
WREG32(data_reg, 0x8A60BEE8); |
WREG32(index_reg, 0x30701); |
WREG32(data_reg, 0xBF2087A0); |
WREG32(index_reg, 0x30800); |
WREG32(data_reg, 0x8900BF00); |
WREG32(index_reg, 0x30801); |
WREG32(data_reg, 0xBF008900); |
} |
struct rv515_watermark { |
u32 lb_request_fifo_depth; |
fixed20_12 num_line_pair; |
fixed20_12 estimated_width; |
fixed20_12 worst_case_latency; |
fixed20_12 consumption_rate; |
fixed20_12 active_time; |
fixed20_12 dbpp; |
fixed20_12 priority_mark_max; |
fixed20_12 priority_mark; |
fixed20_12 sclk; |
static const unsigned r500_reg_safe_bm[159] = { |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFBF, 0xFFFFFFFF, 0xFFFFFFBF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF, |
0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000, |
0xF0000038, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0x1FFFFC78, 0xFFFFE000, 0xFFFFFFFE, 0xFFFFFFFF, |
0x38CF8F50, 0xFFF88082, 0xFF0000FC, 0xFAE009FF, |
0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, |
0xFFFF8CFC, 0xFFFFC1FF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, |
0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFF80FFFF, |
0x00000000, 0x00000000, 0x00000000, 0x00000000, |
0x0003FC01, 0x3FFFFCF8, 0xFE800B19, |
}; |
void rv515_crtc_bandwidth_compute(struct radeon_device *rdev, |
struct radeon_crtc *crtc, |
struct rv515_watermark *wm) |
{ |
struct drm_display_mode *mode = &crtc->base.mode; |
fixed20_12 a, b, c; |
fixed20_12 pclk, request_fifo_depth, tolerable_latency, estimated_width; |
fixed20_12 consumption_time, line_time, chunk_time, read_delay_latency; |
if (!crtc->base.enabled) { |
/* FIXME: wouldn't it better to set priority mark to maximum */ |
wm->lb_request_fifo_depth = 4; |
return; |
} |
if (crtc->vsc.full > rfixed_const(2)) |
wm->num_line_pair.full = rfixed_const(2); |
else |
wm->num_line_pair.full = rfixed_const(1); |
b.full = rfixed_const(mode->crtc_hdisplay); |
c.full = rfixed_const(256); |
a.full = rfixed_mul(wm->num_line_pair, b); |
request_fifo_depth.full = rfixed_div(a, c); |
if (a.full < rfixed_const(4)) { |
wm->lb_request_fifo_depth = 4; |
} else { |
wm->lb_request_fifo_depth = rfixed_trunc(request_fifo_depth); |
} |
/* Determine consumption rate |
* pclk = pixel clock period(ns) = 1000 / (mode.clock / 1000) |
* vtaps = number of vertical taps, |
* vsc = vertical scaling ratio, defined as source/destination |
* hsc = horizontal scaling ration, defined as source/destination |
*/ |
a.full = rfixed_const(mode->clock); |
b.full = rfixed_const(1000); |
a.full = rfixed_div(a, b); |
pclk.full = rfixed_div(b, a); |
if (crtc->rmx_type != RMX_OFF) { |
b.full = rfixed_const(2); |
if (crtc->vsc.full > b.full) |
b.full = crtc->vsc.full; |
b.full = rfixed_mul(b, crtc->hsc); |
c.full = rfixed_const(2); |
b.full = rfixed_div(b, c); |
consumption_time.full = rfixed_div(pclk, b); |
} else { |
consumption_time.full = pclk.full; |
} |
a.full = rfixed_const(1); |
wm->consumption_rate.full = rfixed_div(a, consumption_time); |
/* Determine line time |
* LineTime = total time for one line of displayhtotal |
* LineTime = total number of horizontal pixels |
* pclk = pixel clock period(ns) |
*/ |
a.full = rfixed_const(crtc->base.mode.crtc_htotal); |
line_time.full = rfixed_mul(a, pclk); |
/* Determine active time |
* ActiveTime = time of active region of display within one line, |
* hactive = total number of horizontal active pixels |
* htotal = total number of horizontal pixels |
*/ |
a.full = rfixed_const(crtc->base.mode.crtc_htotal); |
b.full = rfixed_const(crtc->base.mode.crtc_hdisplay); |
wm->active_time.full = rfixed_mul(line_time, b); |
wm->active_time.full = rfixed_div(wm->active_time, a); |
/* Determine chunk time |
* ChunkTime = the time it takes the DCP to send one chunk of data |
* to the LB which consists of pipeline delay and inter chunk gap |
* sclk = system clock(Mhz) |
*/ |
a.full = rfixed_const(600 * 1000); |
chunk_time.full = rfixed_div(a, rdev->pm.sclk); |
read_delay_latency.full = rfixed_const(1000); |
/* Determine the worst case latency |
* NumLinePair = Number of line pairs to request(1=2 lines, 2=4 lines) |
* WorstCaseLatency = worst case time from urgent to when the MC starts |
* to return data |
* READ_DELAY_IDLE_MAX = constant of 1us |
* ChunkTime = time it takes the DCP to send one chunk of data to the LB |
* which consists of pipeline delay and inter chunk gap |
*/ |
if (rfixed_trunc(wm->num_line_pair) > 1) { |
a.full = rfixed_const(3); |
wm->worst_case_latency.full = rfixed_mul(a, chunk_time); |
wm->worst_case_latency.full += read_delay_latency.full; |
} else { |
wm->worst_case_latency.full = chunk_time.full + read_delay_latency.full; |
} |
/* Determine the tolerable latency |
* TolerableLatency = Any given request has only 1 line time |
* for the data to be returned |
* LBRequestFifoDepth = Number of chunk requests the LB can |
* put into the request FIFO for a display |
* LineTime = total time for one line of display |
* ChunkTime = the time it takes the DCP to send one chunk |
* of data to the LB which consists of |
* pipeline delay and inter chunk gap |
*/ |
if ((2+wm->lb_request_fifo_depth) >= rfixed_trunc(request_fifo_depth)) { |
tolerable_latency.full = line_time.full; |
} else { |
tolerable_latency.full = rfixed_const(wm->lb_request_fifo_depth - 2); |
tolerable_latency.full = request_fifo_depth.full - tolerable_latency.full; |
tolerable_latency.full = rfixed_mul(tolerable_latency, chunk_time); |
tolerable_latency.full = line_time.full - tolerable_latency.full; |
} |
/* We assume worst case 32bits (4 bytes) */ |
wm->dbpp.full = rfixed_const(2 * 16); |
/* Determine the maximum priority mark |
* width = viewport width in pixels |
*/ |
a.full = rfixed_const(16); |
wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay); |
wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a); |
/* Determine estimated width */ |
estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full; |
estimated_width.full = rfixed_div(estimated_width, consumption_time); |
if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) { |
wm->priority_mark.full = rfixed_const(10); |
} else { |
a.full = rfixed_const(16); |
wm->priority_mark.full = rfixed_div(estimated_width, a); |
wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full; |
} |
} |
void rv515_bandwidth_avivo_update(struct radeon_device *rdev) |
int rv515_init(struct radeon_device *rdev) |
{ |
struct drm_display_mode *mode0 = NULL; |
struct drm_display_mode *mode1 = NULL; |
struct rv515_watermark wm0; |
struct rv515_watermark wm1; |
u32 tmp; |
fixed20_12 priority_mark02, priority_mark12, fill_rate; |
fixed20_12 a, b; |
dbgprintf("%s\n",__FUNCTION__); |
if (rdev->mode_info.crtcs[0]->base.enabled) |
mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
if (rdev->mode_info.crtcs[1]->base.enabled) |
mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
rs690_line_buffer_adjust(rdev, mode0, mode1); |
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[0], &wm0); |
rv515_crtc_bandwidth_compute(rdev, rdev->mode_info.crtcs[1], &wm1); |
tmp = wm0.lb_request_fifo_depth; |
tmp |= wm1.lb_request_fifo_depth << 16; |
WREG32(LB_MAX_REQ_OUTSTANDING, tmp); |
if (mode0 && mode1) { |
if (rfixed_trunc(wm0.dbpp) > 64) |
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); |
else |
a.full = wm0.num_line_pair.full; |
if (rfixed_trunc(wm1.dbpp) > 64) |
b.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); |
else |
b.full = wm1.num_line_pair.full; |
a.full += b.full; |
fill_rate.full = rfixed_div(wm0.sclk, a); |
if (wm0.consumption_rate.full > fill_rate.full) { |
b.full = wm0.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm0.active_time); |
a.full = rfixed_const(16); |
b.full = rfixed_div(b, a); |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
priority_mark02.full = a.full + b.full; |
} else { |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark02.full = rfixed_div(a, b); |
rdev->config.r300.reg_safe_bm = r500_reg_safe_bm; |
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r500_reg_safe_bm); |
return 0; |
} |
if (wm1.consumption_rate.full > fill_rate.full) { |
b.full = wm1.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm1.active_time); |
a.full = rfixed_const(16); |
b.full = rfixed_div(b, a); |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
priority_mark12.full = a.full + b.full; |
} else { |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark12.full = rfixed_div(a, b); |
} |
if (wm0.priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark.full; |
if (rfixed_trunc(priority_mark02) < 0) |
priority_mark02.full = 0; |
if (wm0.priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark_max.full; |
if (wm1.priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark.full; |
if (rfixed_trunc(priority_mark12) < 0) |
priority_mark12.full = 0; |
if (wm1.priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark_max.full; |
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
} else if (mode0) { |
if (rfixed_trunc(wm0.dbpp) > 64) |
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair); |
else |
a.full = wm0.num_line_pair.full; |
fill_rate.full = rfixed_div(wm0.sclk, a); |
if (wm0.consumption_rate.full > fill_rate.full) { |
b.full = wm0.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm0.active_time); |
a.full = rfixed_const(16); |
b.full = rfixed_div(b, a); |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
priority_mark02.full = a.full + b.full; |
} else { |
a.full = rfixed_mul(wm0.worst_case_latency, |
wm0.consumption_rate); |
b.full = rfixed_const(16); |
priority_mark02.full = rfixed_div(a, b); |
} |
if (wm0.priority_mark.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark.full; |
if (rfixed_trunc(priority_mark02) < 0) |
priority_mark02.full = 0; |
if (wm0.priority_mark_max.full > priority_mark02.full) |
priority_mark02.full = wm0.priority_mark_max.full; |
WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02)); |
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
} else { |
if (rfixed_trunc(wm1.dbpp) > 64) |
a.full = rfixed_div(wm1.dbpp, wm1.num_line_pair); |
else |
a.full = wm1.num_line_pair.full; |
fill_rate.full = rfixed_div(wm1.sclk, a); |
if (wm1.consumption_rate.full > fill_rate.full) { |
b.full = wm1.consumption_rate.full - fill_rate.full; |
b.full = rfixed_mul(b, wm1.active_time); |
a.full = rfixed_const(16); |
b.full = rfixed_div(b, a); |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
priority_mark12.full = a.full + b.full; |
} else { |
a.full = rfixed_mul(wm1.worst_case_latency, |
wm1.consumption_rate); |
b.full = rfixed_const(16 * 1000); |
priority_mark12.full = rfixed_div(a, b); |
} |
if (wm1.priority_mark.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark.full; |
if (rfixed_trunc(priority_mark12) < 0) |
priority_mark12.full = 0; |
if (wm1.priority_mark_max.full > priority_mark12.full) |
priority_mark12.full = wm1.priority_mark_max.full; |
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF); |
WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF); |
WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12)); |
WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12)); |
} |
} |
void rv515_bandwidth_update(struct radeon_device *rdev) |
{ |
uint32_t tmp; |
struct drm_display_mode *mode0 = NULL; |
struct drm_display_mode *mode1 = NULL; |
if (rdev->mode_info.crtcs[0]->base.enabled) |
mode0 = &rdev->mode_info.crtcs[0]->base.mode; |
if (rdev->mode_info.crtcs[1]->base.enabled) |
mode1 = &rdev->mode_info.crtcs[1]->base.mode; |
/* |
* Set display0/1 priority up in the memory controller for |
* modes if the user specifies HIGH for displaypriority |
* option. |
*/ |
if (rdev->disp_priority == 2) { |
tmp = RREG32_MC(MC_MISC_LAT_TIMER); |
tmp &= ~MC_DISP1R_INIT_LAT_MASK; |
tmp &= ~MC_DISP0R_INIT_LAT_MASK; |
if (mode1) |
tmp |= (1 << MC_DISP1R_INIT_LAT_SHIFT); |
if (mode0) |
tmp |= (1 << MC_DISP0R_INIT_LAT_SHIFT); |
WREG32_MC(MC_MISC_LAT_TIMER, tmp); |
} |
rv515_bandwidth_avivo_update(rdev); |
} |
/drivers/video/drm/radeon/r600.c |
---|
67,7 → 67,7 |
"programming pipes. Bad things might happen.\n"); |
} |
tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1; |
tmp = rdev->mc.vram_location + rdev->mc.vram_size - 1; |
tmp = REG_SET(R600_MC_FB_TOP, tmp >> 24); |
tmp |= REG_SET(R600_MC_FB_BASE, rdev->mc.vram_location >> 24); |
WREG32(R600_MC_VM_FB_LOCATION, tmp); |
140,8 → 140,7 |
void r600_vram_info(struct radeon_device *rdev) |
{ |
r600_vram_get_type(rdev); |
rdev->mc.real_vram_size = RREG32(R600_CONFIG_MEMSIZE); |
rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
rdev->mc.vram_size = RREG32(R600_CONFIG_MEMSIZE); |
/* Could aper size report 0 ? */ |
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0); |
/drivers/video/drm/radeon/radeon.h |
---|
45,23 → 45,18 |
*/ |
#include <types.h> |
#include <linux/list.h> |
#include <list.h> |
#include <pci.h> |
#include <errno-base.h> |
#include "drm_edid.h" |
#include "radeon_family.h" |
#include "radeon_mode.h" |
#include "radeon_reg.h" |
#include "r300.h" |
#include <syscall.h> |
/* |
* Modules parameters. |
*/ |
extern int radeon_no_wb; |
extern int radeon_modeset; |
extern int radeon_dynclks; |
extern int radeon_r4xx_atom; |
69,62 → 64,8 |
extern int radeon_vram_limit; |
extern int radeon_gart_size; |
extern int radeon_benchmarking; |
extern int radeon_testing; |
extern int radeon_connector_table; |
extern int radeon_tv; |
static inline uint8_t __raw_readb(const volatile void __iomem *addr) |
{ |
return *(const volatile uint8_t __force *) addr; |
} |
static inline uint16_t __raw_readw(const volatile void __iomem *addr) |
{ |
return *(const volatile uint16_t __force *) addr; |
} |
static inline uint32_t __raw_readl(const volatile void __iomem *addr) |
{ |
return *(const volatile uint32_t __force *) addr; |
} |
#define readb __raw_readb |
#define readw __raw_readw |
#define readl __raw_readl |
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr) |
{ |
*(volatile uint8_t __force *) addr = b; |
} |
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr) |
{ |
*(volatile uint16_t __force *) addr = b; |
} |
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr) |
{ |
*(volatile uint32_t __force *) addr = b; |
} |
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr) |
{ |
*(volatile __u64 *)addr = b; |
} |
#define writeb __raw_writeb |
#define writew __raw_writew |
#define writel __raw_writel |
#define writeq __raw_writeq |
//#define writeb(b,addr) *(volatile uint8_t* ) addr = (uint8_t)b |
//#define writew(b,addr) *(volatile uint16_t*) addr = (uint16_t)b |
//#define writel(b,addr) *(volatile uint32_t*) addr = (uint32_t)b |
/* |
* Copy from radeon_drv.h so we don't have to include both and have conflicting |
* symbol; |
133,8 → 74,63 |
#define RADEON_IB_POOL_SIZE 16 |
#define RADEON_DEBUGFS_MAX_NUM_FILES 32 |
#define RADEONFB_CONN_LIMIT 4 |
#define RADEON_BIOS_NUM_SCRATCH 8 |
enum radeon_family { |
CHIP_R100, |
CHIP_RV100, |
CHIP_RS100, |
CHIP_RV200, |
CHIP_RS200, |
CHIP_R200, |
CHIP_RV250, |
CHIP_RS300, |
CHIP_RV280, |
CHIP_R300, |
CHIP_R350, |
CHIP_RV350, |
CHIP_RV380, |
CHIP_R420, |
CHIP_R423, |
CHIP_RV410, |
CHIP_RS400, |
CHIP_RS480, |
CHIP_RS600, |
CHIP_RS690, |
CHIP_RS740, |
CHIP_RV515, |
CHIP_R520, |
CHIP_RV530, |
CHIP_RV560, |
CHIP_RV570, |
CHIP_R580, |
CHIP_R600, |
CHIP_RV610, |
CHIP_RV630, |
CHIP_RV620, |
CHIP_RV635, |
CHIP_RV670, |
CHIP_RS780, |
CHIP_RV770, |
CHIP_RV730, |
CHIP_RV710, |
CHIP_LAST, |
}; |
enum radeon_chip_flags { |
RADEON_FAMILY_MASK = 0x0000ffffUL, |
RADEON_FLAGS_MASK = 0xffff0000UL, |
RADEON_IS_MOBILITY = 0x00010000UL, |
RADEON_IS_IGP = 0x00020000UL, |
RADEON_SINGLE_CRTC = 0x00040000UL, |
RADEON_IS_AGP = 0x00080000UL, |
RADEON_HAS_HIERZ = 0x00100000UL, |
RADEON_IS_PCIE = 0x00200000UL, |
RADEON_NEW_MEMMAP = 0x00400000UL, |
RADEON_IS_PCI = 0x00800000UL, |
RADEON_IS_IGPGART = 0x01000000UL, |
}; |
/* |
* Errata workarounds. |
*/ |
153,21 → 149,10 |
*/ |
bool radeon_get_bios(struct radeon_device *rdev); |
/* |
* Dummy page |
* Clocks |
*/ |
struct radeon_dummy_page { |
struct page *page; |
dma_addr_t addr; |
}; |
int radeon_dummy_page_init(struct radeon_device *rdev); |
void radeon_dummy_page_fini(struct radeon_device *rdev); |
/* |
* Clocks |
*/ |
struct radeon_clock { |
struct radeon_pll p1pll; |
struct radeon_pll p2pll; |
178,7 → 163,6 |
uint32_t default_sclk; |
}; |
/* |
* Fences. |
*/ |
217,15 → 201,7 |
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence); |
void radeon_fence_unref(struct radeon_fence **fence); |
/* |
* Tiling registers |
*/ |
struct radeon_surface_reg { |
struct radeon_object *robj; |
}; |
#define RADEON_GEM_MAX_SURFACES 8 |
/* |
* Radeon buffer. |
*/ |
237,7 → 213,6 |
uint64_t gpu_offset; |
unsigned rdomain; |
unsigned wdomain; |
uint32_t tiling_flags; |
}; |
int radeon_object_init(struct radeon_device *rdev); |
319,18 → 294,11 |
resource_size_t aper_size; |
resource_size_t aper_base; |
resource_size_t agp_base; |
/* for some chips with <= 32MB we need to lie |
* about vram size near mc fb location */ |
u64 mc_vram_size; |
u64 gtt_location; |
u64 gtt_size; |
u64 gtt_start; |
u64 gtt_end; |
u64 vram_location; |
u64 vram_start; |
u64 vram_end; |
unsigned gtt_location; |
unsigned gtt_size; |
unsigned vram_location; |
unsigned vram_size; |
unsigned vram_width; |
u64 real_vram_size; |
int vram_mtrr; |
bool vram_is_ddr; |
}; |
377,10 → 345,6 |
uint32_t length_dw; |
}; |
/* |
* locking - |
* mutex protects scheduled_ibs, ready, alloc_bm |
*/ |
struct radeon_ib_pool { |
// struct mutex mutex; |
struct radeon_object *robj; |
406,16 → 370,6 |
bool ready; |
}; |
struct r600_blit { |
struct radeon_object *shader_obj; |
u64 shader_gpu_addr; |
u32 vs_offset, ps_offset; |
u32 state_offset; |
u32 state_len; |
u32 vb_used, vb_total; |
struct radeon_ib *vb_ib; |
}; |
int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib); |
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib); |
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib); |
468,7 → 422,6 |
int chunk_relocs_idx; |
struct radeon_ib *ib; |
void *track; |
unsigned family; |
}; |
struct radeon_cs_packet { |
503,38 → 456,6 |
uint64_t gpu_addr; |
}; |
/** |
* struct radeon_pm - power management datas |
* @max_bandwidth: maximum bandwidth the gpu has (MByte/s) |
* @igp_sideport_mclk: sideport memory clock Mhz (rs690,rs740,rs780,rs880) |
* @igp_system_mclk: system clock Mhz (rs690,rs740,rs780,rs880) |
* @igp_ht_link_clk: ht link clock Mhz (rs690,rs740,rs780,rs880) |
* @igp_ht_link_width: ht link width in bits (rs690,rs740,rs780,rs880) |
* @k8_bandwidth: k8 bandwidth the gpu has (MByte/s) (IGP) |
* @sideport_bandwidth: sideport bandwidth the gpu has (MByte/s) (IGP) |
* @ht_bandwidth: ht bandwidth the gpu has (MByte/s) (IGP) |
* @core_bandwidth: core GPU bandwidth the gpu has (MByte/s) (IGP) |
* @sclk: GPU clock Mhz (core bandwith depends of this clock) |
* @needed_bandwidth: current bandwidth needs |
* |
* It keeps track of various data needed to take powermanagement decision. |
* Bandwith need is used to determine minimun clock of the GPU and memory. |
* Equation between gpu/memory clock and available bandwidth is hw dependent |
* (type of memory, bus size, efficiency, ...) |
*/ |
struct radeon_pm { |
fixed20_12 max_bandwidth; |
fixed20_12 igp_sideport_mclk; |
fixed20_12 igp_system_mclk; |
fixed20_12 igp_ht_link_clk; |
fixed20_12 igp_ht_link_width; |
fixed20_12 k8_bandwidth; |
fixed20_12 sideport_bandwidth; |
fixed20_12 ht_bandwidth; |
fixed20_12 core_bandwidth; |
fixed20_12 sclk; |
fixed20_12 needed_bandwidth; |
}; |
/* |
* ASIC specific functions. |
541,19 → 462,13 |
*/ |
struct radeon_asic { |
int (*init)(struct radeon_device *rdev); |
void (*fini)(struct radeon_device *rdev); |
int (*resume)(struct radeon_device *rdev); |
int (*suspend)(struct radeon_device *rdev); |
void (*errata)(struct radeon_device *rdev); |
void (*vram_info)(struct radeon_device *rdev); |
void (*vga_set_state)(struct radeon_device *rdev, bool state); |
int (*gpu_reset)(struct radeon_device *rdev); |
int (*mc_init)(struct radeon_device *rdev); |
void (*mc_fini)(struct radeon_device *rdev); |
int (*wb_init)(struct radeon_device *rdev); |
void (*wb_fini)(struct radeon_device *rdev); |
int (*gart_init)(struct radeon_device *rdev); |
void (*gart_fini)(struct radeon_device *rdev); |
int (*gart_enable)(struct radeon_device *rdev); |
void (*gart_disable)(struct radeon_device *rdev); |
void (*gart_tlb_flush)(struct radeon_device *rdev); |
561,14 → 476,9 |
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size); |
void (*cp_fini)(struct radeon_device *rdev); |
void (*cp_disable)(struct radeon_device *rdev); |
void (*cp_commit)(struct radeon_device *rdev); |
void (*ring_start)(struct radeon_device *rdev); |
int (*ring_test)(struct radeon_device *rdev); |
void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib); |
int (*ib_test)(struct radeon_device *rdev); |
int (*irq_set)(struct radeon_device *rdev); |
int (*irq_process)(struct radeon_device *rdev); |
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc); |
void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence); |
int (*cs_parse)(struct radeon_cs_parser *p); |
int (*copy_blit)(struct radeon_device *rdev, |
590,75 → 500,14 |
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock); |
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes); |
void (*set_clock_gating)(struct radeon_device *rdev, int enable); |
int (*set_surface_reg)(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size); |
int (*clear_surface_reg)(struct radeon_device *rdev, int reg); |
void (*bandwidth_update)(struct radeon_device *rdev); |
}; |
/* |
* Asic structures |
*/ |
struct r100_asic { |
const unsigned *reg_safe_bm; |
unsigned reg_safe_bm_size; |
}; |
struct r300_asic { |
const unsigned *reg_safe_bm; |
unsigned reg_safe_bm_size; |
}; |
struct r600_asic { |
unsigned max_pipes; |
unsigned max_tile_pipes; |
unsigned max_simds; |
unsigned max_backends; |
unsigned max_gprs; |
unsigned max_threads; |
unsigned max_stack_entries; |
unsigned max_hw_contexts; |
unsigned max_gs_threads; |
unsigned sx_max_export_size; |
unsigned sx_max_export_pos_size; |
unsigned sx_max_export_smx_size; |
unsigned sq_num_cf_insts; |
}; |
struct rv770_asic { |
unsigned max_pipes; |
unsigned max_tile_pipes; |
unsigned max_simds; |
unsigned max_backends; |
unsigned max_gprs; |
unsigned max_threads; |
unsigned max_stack_entries; |
unsigned max_hw_contexts; |
unsigned max_gs_threads; |
unsigned sx_max_export_size; |
unsigned sx_max_export_pos_size; |
unsigned sx_max_export_smx_size; |
unsigned sq_num_cf_insts; |
unsigned sx_num_of_sets; |
unsigned sc_prim_fifo_size; |
unsigned sc_hiz_tile_fifo_size; |
unsigned sc_earlyz_tile_fifo_fize; |
}; |
union radeon_asic_config { |
struct r300_asic r300; |
struct r100_asic r100; |
struct r600_asic r600; |
struct rv770_asic rv770; |
}; |
/* |
/* |
* Core structure, functions and helpers. |
*/ |
675,7 → 524,6 |
int usec_timeout; |
enum radeon_pll_errata pll_errata; |
int num_gb_pipes; |
int num_z_pipes; |
int disp_priority; |
/* BIOS */ |
uint8_t *bios; |
686,15 → 534,20 |
struct fb_info *fbdev_info; |
struct radeon_object *fbdev_robj; |
struct radeon_framebuffer *fbdev_rfb; |
/* Register mmio */ |
unsigned long rmmio_base; |
unsigned long rmmio_size; |
void *rmmio; |
radeon_rreg_t mm_rreg; |
radeon_wreg_t mm_wreg; |
radeon_rreg_t mc_rreg; |
radeon_wreg_t mc_wreg; |
radeon_rreg_t pll_rreg; |
radeon_wreg_t pll_wreg; |
uint32_t pcie_reg_mask; |
radeon_rreg_t pcie_rreg; |
radeon_wreg_t pcie_wreg; |
radeon_rreg_t pciep_rreg; |
radeon_wreg_t pciep_wreg; |
struct radeon_clock clock; |
709,21 → 562,11 |
// struct radeon_irq irq; |
struct radeon_asic *asic; |
struct radeon_gem gem; |
struct radeon_pm pm; |
uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; |
// struct mutex cs_mutex; |
struct radeon_wb wb; |
struct radeon_dummy_page dummy_page; |
bool gpu_lockup; |
bool shutdown; |
bool suspend; |
bool need_dma32; |
bool new_init_path; |
bool accel_working; |
struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES]; |
const struct firmware *me_fw; /* all family ME firmware */ |
const struct firmware *pfp_fw; /* r6/700 PFP firmware */ |
struct r600_blit r600_blit; |
}; |
int radeon_device_init(struct radeon_device *rdev, |
733,41 → 576,70 |
void radeon_device_fini(struct radeon_device *rdev); |
int radeon_gpu_wait_for_idle(struct radeon_device *rdev); |
/* r600 blit */ |
int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes); |
void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence); |
void r600_kms_blit_copy(struct radeon_device *rdev, |
u64 src_gpu_addr, u64 dst_gpu_addr, |
int size_bytes); |
#define __iomem |
#define __force |
static inline uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg) |
static inline uint8_t __raw_readb(const volatile void __iomem *addr) |
{ |
if (reg < 0x10000) |
return readl(((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
return readl(((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
return *(const volatile uint8_t __force *) addr; |
} |
static inline uint16_t __raw_readw(const volatile void __iomem *addr) |
{ |
return *(const volatile uint16_t __force *) addr; |
} |
static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
static inline uint32_t __raw_readl(const volatile void __iomem *addr) |
{ |
if (reg < 0x10000) |
writel(v, ((void __iomem *)rdev->rmmio) + reg); |
else { |
writel(reg, ((void __iomem *)rdev->rmmio) + RADEON_MM_INDEX); |
writel(v, ((void __iomem *)rdev->rmmio) + RADEON_MM_DATA); |
return *(const volatile uint32_t __force *) addr; |
} |
#define readb __raw_readb |
#define readw __raw_readw |
#define readl __raw_readl |
static inline void __raw_writeb(uint8_t b, volatile void __iomem *addr) |
{ |
*(volatile uint8_t __force *) addr = b; |
} |
static inline void __raw_writew(uint16_t b, volatile void __iomem *addr) |
{ |
*(volatile uint16_t __force *) addr = b; |
} |
static inline void __raw_writel(uint32_t b, volatile void __iomem *addr) |
{ |
*(volatile uint32_t __force *) addr = b; |
} |
static inline void __raw_writeq(__u64 b, volatile void __iomem *addr) |
{ |
*(volatile __u64 *)addr = b; |
} |
#define writeb __raw_writeb |
#define writew __raw_writew |
#define writel __raw_writel |
#define writeq __raw_writeq |
//#define writeb(b,addr) *(volatile uint8_t* ) addr = (uint8_t)b |
//#define writew(b,addr) *(volatile uint16_t*) addr = (uint16_t)b |
//#define writel(b,addr) *(volatile uint32_t*) addr = (uint32_t)b |
/* |
* Registers read & write functions. |
*/ |
#define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg)) |
#define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg)) |
#define RREG32(reg) r100_mm_rreg(rdev, (reg)) |
#define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v)) |
#define RREG32(reg) rdev->mm_rreg(rdev, (reg)) |
#define WREG32(reg, v) rdev->mm_wreg(rdev, (reg), (v)) |
#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
#define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg)) |
774,8 → 646,8 |
#define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v)) |
#define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg)) |
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v)) |
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg)) |
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v)) |
#define RREG32_PCIE(reg) rdev->pcie_rreg(rdev, (reg)) |
#define WREG32_PCIE(reg, v) rdev->pcie_wreg(rdev, (reg), (v)) |
#define WREG32_P(reg, val, mask) \ |
do { \ |
uint32_t tmp_ = RREG32(reg); \ |
791,34 → 663,7 |
WREG32_PLL(reg, tmp_); \ |
} while (0) |
/* |
* Indirect registers accessor |
*/ |
static inline uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
{ |
uint32_t r; |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
r = RREG32(RADEON_PCIE_DATA); |
return r; |
} |
static inline void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
{ |
WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
WREG32(RADEON_PCIE_DATA, (v)); |
} |
void r100_pll_errata_after_index(struct radeon_device *rdev); |
#define radeon_PCI_IDS \ |
{0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
{0x1002, 0x3152, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
1230,8 → 1075,6 |
/* |
* ASICs helpers. |
*/ |
#define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \ |
(rdev->pdev->device == 0x5969)) |
#define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \ |
(rdev->family == CHIP_RV200) || \ |
(rdev->family == CHIP_RS100) || \ |
1332,20 → 1175,14 |
* ASICs macro. |
*/ |
#define radeon_init(rdev) (rdev)->asic->init((rdev)) |
#define radeon_fini(rdev) (rdev)->asic->fini((rdev)) |
#define radeon_resume(rdev) (rdev)->asic->resume((rdev)) |
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev)) |
#define radeon_cs_parse(p) rdev->asic->cs_parse((p)) |
#define radeon_errata(rdev) (rdev)->asic->errata((rdev)) |
#define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev)) |
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state)) |
#define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev)) |
#define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev)) |
#define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev)) |
#define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev)) |
#define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev)) |
#define radeon_gpu_gart_init(rdev) (rdev)->asic->gart_init((rdev)) |
#define radeon_gpu_gart_fini(rdev) (rdev)->asic->gart_fini((rdev)) |
#define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev)) |
#define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev)) |
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev)) |
1353,14 → 1190,9 |
#define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize)) |
#define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev)) |
#define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev)) |
#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev)) |
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev)) |
#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev)) |
#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib)) |
#define radeon_ib_test(rdev) (rdev)->asic->ib_test((rdev)) |
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev)) |
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev)) |
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc)) |
#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence)) |
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f)) |
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f)) |
1369,102 → 1201,8 |
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e)) |
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l)) |
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e)) |
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s))) |
#define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r))) |
#define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev)) |
/* Common functions */ |
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev); |
extern int radeon_modeset_init(struct radeon_device *rdev); |
extern void radeon_modeset_fini(struct radeon_device *rdev); |
extern bool radeon_card_posted(struct radeon_device *rdev); |
extern int radeon_clocks_init(struct radeon_device *rdev); |
extern void radeon_clocks_fini(struct radeon_device *rdev); |
extern void radeon_scratch_init(struct radeon_device *rdev); |
extern void radeon_surface_init(struct radeon_device *rdev); |
extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); |
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ |
struct r100_mc_save { |
u32 GENMO_WT; |
u32 CRTC_EXT_CNTL; |
u32 CRTC_GEN_CNTL; |
u32 CRTC2_GEN_CNTL; |
u32 CUR_OFFSET; |
u32 CUR2_OFFSET; |
}; |
extern void r100_cp_disable(struct radeon_device *rdev); |
extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); |
extern void r100_cp_fini(struct radeon_device *rdev); |
extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
extern int r100_pci_gart_init(struct radeon_device *rdev); |
extern void r100_pci_gart_fini(struct radeon_device *rdev); |
extern int r100_pci_gart_enable(struct radeon_device *rdev); |
extern void r100_pci_gart_disable(struct radeon_device *rdev); |
extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
extern int r100_debugfs_mc_info_init(struct radeon_device *rdev); |
extern int r100_gui_wait_for_idle(struct radeon_device *rdev); |
extern void r100_ib_fini(struct radeon_device *rdev); |
extern int r100_ib_init(struct radeon_device *rdev); |
extern void r100_irq_disable(struct radeon_device *rdev); |
extern int r100_irq_set(struct radeon_device *rdev); |
extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save); |
extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save); |
extern void r100_vram_init_sizes(struct radeon_device *rdev); |
extern void r100_wb_disable(struct radeon_device *rdev); |
extern void r100_wb_fini(struct radeon_device *rdev); |
extern int r100_wb_init(struct radeon_device *rdev); |
/* r300,r350,rv350,rv370,rv380 */ |
extern void r300_set_reg_safe(struct radeon_device *rdev); |
extern void r300_mc_program(struct radeon_device *rdev); |
extern void r300_vram_info(struct radeon_device *rdev); |
extern int rv370_pcie_gart_init(struct radeon_device *rdev); |
extern void rv370_pcie_gart_fini(struct radeon_device *rdev); |
extern int rv370_pcie_gart_enable(struct radeon_device *rdev); |
extern void rv370_pcie_gart_disable(struct radeon_device *rdev); |
/* r420,r423,rv410 */ |
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); |
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); |
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); |
/* rv515 */ |
extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); |
/* rs690, rs740 */ |
extern void rs690_line_buffer_adjust(struct radeon_device *rdev, |
struct drm_display_mode *mode1, |
struct drm_display_mode *mode2); |
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ |
extern bool r600_card_posted(struct radeon_device *rdev); |
extern void r600_cp_stop(struct radeon_device *rdev); |
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size); |
extern int r600_cp_resume(struct radeon_device *rdev); |
extern int r600_count_pipe_bits(uint32_t val); |
extern int r600_gart_clear_page(struct radeon_device *rdev, int i); |
extern int r600_mc_wait_for_idle(struct radeon_device *rdev); |
extern int r600_pcie_gart_init(struct radeon_device *rdev); |
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); |
extern int r600_ib_test(struct radeon_device *rdev); |
extern int r600_ring_test(struct radeon_device *rdev); |
extern int r600_wb_init(struct radeon_device *rdev); |
extern void r600_wb_fini(struct radeon_device *rdev); |
extern void r600_scratch_init(struct radeon_device *rdev); |
extern int r600_blit_init(struct radeon_device *rdev); |
extern void r600_blit_fini(struct radeon_device *rdev); |
extern int r600_cp_init_microcode(struct radeon_device *rdev); |
extern int r600_gpu_reset(struct radeon_device *rdev); |
#define DRM_UDELAY(d) udelay(d) |
resource_size_t |
1474,6 → 1212,4 |
bool set_mode(struct drm_device *dev, int width, int height); |
#endif |
/drivers/video/drm/radeon/radeon_asic.h |
---|
42,21 → 42,16 |
* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 |
*/ |
int r100_init(struct radeon_device *rdev); |
int r200_init(struct radeon_device *rdev); |
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg); |
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
void r100_errata(struct radeon_device *rdev); |
void r100_vram_info(struct radeon_device *rdev); |
void r100_vga_set_state(struct radeon_device *rdev, bool state); |
int r100_gpu_reset(struct radeon_device *rdev); |
int r100_mc_init(struct radeon_device *rdev); |
void r100_mc_fini(struct radeon_device *rdev); |
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc); |
int r100_wb_init(struct radeon_device *rdev); |
void r100_wb_fini(struct radeon_device *rdev); |
int r100_pci_gart_init(struct radeon_device *rdev); |
void r100_pci_gart_fini(struct radeon_device *rdev); |
int r100_pci_gart_enable(struct radeon_device *rdev); |
int r100_gart_enable(struct radeon_device *rdev); |
void r100_pci_gart_disable(struct radeon_device *rdev); |
void r100_pci_gart_tlb_flush(struct radeon_device *rdev); |
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
63,7 → 58,6 |
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size); |
void r100_cp_fini(struct radeon_device *rdev); |
void r100_cp_disable(struct radeon_device *rdev); |
void r100_cp_commit(struct radeon_device *rdev); |
void r100_ring_start(struct radeon_device *rdev); |
int r100_irq_set(struct radeon_device *rdev); |
int r100_irq_process(struct radeon_device *rdev); |
77,15 → 71,8 |
uint64_t dst_offset, |
unsigned num_pages, |
struct radeon_fence *fence); |
int r100_set_surface_reg(struct radeon_device *rdev, int reg, |
uint32_t tiling_flags, uint32_t pitch, |
uint32_t offset, uint32_t obj_size); |
int r100_clear_surface_reg(struct radeon_device *rdev, int reg); |
void r100_bandwidth_update(struct radeon_device *rdev); |
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); |
int r100_ib_test(struct radeon_device *rdev); |
int r100_ring_test(struct radeon_device *rdev); |
static struct radeon_asic r100_asic = { |
.init = &r100_init, |
.errata = &r100_errata, |
95,7 → 82,7 |
.mc_fini = &r100_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_enable = &r100_pci_gart_enable, |
.gart_enable = &r100_gart_enable, |
.gart_disable = &r100_pci_gart_disable, |
.gart_tlb_flush = &r100_pci_gart_tlb_flush, |
.gart_set_page = &r100_pci_gart_set_page, |
114,9 → 101,6 |
// .set_memory_clock = NULL, |
// .set_pcie_lanes = NULL, |
// .set_clock_gating = &radeon_legacy_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &r100_bandwidth_update, |
}; |
133,9 → 117,7 |
void r300_fence_ring_emit(struct radeon_device *rdev, |
struct radeon_fence *fence); |
int r300_cs_parse(struct radeon_cs_parser *p); |
int rv370_pcie_gart_init(struct radeon_device *rdev); |
void rv370_pcie_gart_fini(struct radeon_device *rdev); |
int rv370_pcie_gart_enable(struct radeon_device *rdev); |
int r300_gart_enable(struct radeon_device *rdev); |
void rv370_pcie_gart_disable(struct radeon_device *rdev); |
void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev); |
int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
148,17 → 130,17 |
unsigned num_pages, |
struct radeon_fence *fence); |
static struct radeon_asic r300_asic = { |
.init = &r300_init, |
.errata = &r300_errata, |
.vram_info = &r300_vram_info, |
.vga_set_state = &r100_vga_set_state, |
.gpu_reset = &r300_gpu_reset, |
.mc_init = &r300_mc_init, |
.mc_fini = &r300_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_enable = &r100_pci_gart_enable, |
.gart_enable = &r300_gart_enable, |
.gart_disable = &r100_pci_gart_disable, |
.gart_tlb_flush = &r100_pci_gart_tlb_flush, |
.gart_set_page = &r100_pci_gart_set_page, |
177,38 → 159,32 |
// .set_memory_clock = NULL, |
// .set_pcie_lanes = &rv370_set_pcie_lanes, |
// .set_clock_gating = &radeon_legacy_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &r100_bandwidth_update, |
}; |
/* |
* r420,r423,rv410 |
*/ |
extern int r420_init(struct radeon_device *rdev); |
extern void r420_fini(struct radeon_device *rdev); |
extern int r420_suspend(struct radeon_device *rdev); |
extern int r420_resume(struct radeon_device *rdev); |
void r420_errata(struct radeon_device *rdev); |
void r420_vram_info(struct radeon_device *rdev); |
int r420_mc_init(struct radeon_device *rdev); |
void r420_mc_fini(struct radeon_device *rdev); |
static struct radeon_asic r420_asic = { |
.init = &r420_init, |
.fini = &r420_fini, |
.suspend = &r420_suspend, |
.resume = &r420_resume, |
.errata = NULL, |
.vram_info = NULL, |
.vga_set_state = &r100_vga_set_state, |
.init = &r300_init, |
.errata = &r420_errata, |
.vram_info = &r420_vram_info, |
.gpu_reset = &r300_gpu_reset, |
.mc_init = NULL, |
.mc_fini = NULL, |
.wb_init = NULL, |
.wb_fini = NULL, |
.gart_enable = NULL, |
.gart_disable = NULL, |
.mc_init = &r420_mc_init, |
.mc_fini = &r420_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_enable = &r300_gart_enable, |
.gart_disable = &rv370_pcie_gart_disable, |
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
.gart_set_page = &rv370_pcie_gart_set_page, |
.cp_init = NULL, |
.cp_fini = NULL, |
.cp_disable = NULL, |
.cp_init = &r100_cp_init, |
// .cp_fini = &r100_cp_fini, |
// .cp_disable = &r100_cp_disable, |
.ring_start = &r300_ring_start, |
// .irq_set = &r100_irq_set, |
// .irq_process = &r100_irq_process, |
221,9 → 197,6 |
// .set_memory_clock = &radeon_atom_set_memory_clock, |
// .set_pcie_lanes = &rv370_set_pcie_lanes, |
// .set_clock_gating = &radeon_atom_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &r100_bandwidth_update, |
}; |
234,8 → 207,6 |
void rs400_vram_info(struct radeon_device *rdev); |
int rs400_mc_init(struct radeon_device *rdev); |
void rs400_mc_fini(struct radeon_device *rdev); |
int rs400_gart_init(struct radeon_device *rdev); |
void rs400_gart_fini(struct radeon_device *rdev); |
int rs400_gart_enable(struct radeon_device *rdev); |
void rs400_gart_disable(struct radeon_device *rdev); |
void rs400_gart_tlb_flush(struct radeon_device *rdev); |
246,14 → 217,11 |
.init = &r300_init, |
.errata = &rs400_errata, |
.vram_info = &rs400_vram_info, |
.vga_set_state = &r100_vga_set_state, |
.gpu_reset = &r300_gpu_reset, |
.mc_init = &rs400_mc_init, |
.mc_fini = &rs400_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_init = &rs400_gart_init, |
.gart_fini = &rs400_gart_fini, |
.gart_enable = &rs400_gart_enable, |
.gart_disable = &rs400_gart_disable, |
.gart_tlb_flush = &rs400_gart_tlb_flush, |
261,7 → 229,6 |
.cp_init = &r100_cp_init, |
// .cp_fini = &r100_cp_fini, |
// .cp_disable = &r100_cp_disable, |
.cp_commit = &r100_cp_commit, |
.ring_start = &r300_ring_start, |
// .irq_set = &r100_irq_set, |
// .irq_process = &r100_irq_process, |
274,9 → 241,6 |
// .set_memory_clock = NULL, |
// .set_pcie_lanes = NULL, |
// .set_clock_gating = &radeon_legacy_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &r100_bandwidth_update, |
}; |
283,16 → 247,11 |
/* |
* rs600. |
*/ |
int rs600_init(struct radeon_device *rdev); |
void rs600_errata(struct radeon_device *rdev); |
void rs600_vram_info(struct radeon_device *rdev); |
int rs600_mc_init(struct radeon_device *rdev); |
void rs600_mc_fini(struct radeon_device *rdev); |
int rs600_irq_set(struct radeon_device *rdev); |
int rs600_irq_process(struct radeon_device *rdev); |
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc); |
int rs600_gart_init(struct radeon_device *rdev); |
void rs600_gart_fini(struct radeon_device *rdev); |
int rs600_gart_enable(struct radeon_device *rdev); |
void rs600_gart_disable(struct radeon_device *rdev); |
void rs600_gart_tlb_flush(struct radeon_device *rdev); |
299,19 → 258,16 |
int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr); |
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
void rs600_bandwidth_update(struct radeon_device *rdev); |
static struct radeon_asic rs600_asic = { |
.init = &rs600_init, |
.init = &r300_init, |
.errata = &rs600_errata, |
.vram_info = &rs600_vram_info, |
.vga_set_state = &r100_vga_set_state, |
.gpu_reset = &r300_gpu_reset, |
.mc_init = &rs600_mc_init, |
.mc_fini = &rs600_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_init = &rs600_gart_init, |
.gart_fini = &rs600_gart_fini, |
.gart_enable = &rs600_gart_enable, |
.gart_disable = &rs600_gart_disable, |
.gart_tlb_flush = &rs600_gart_tlb_flush, |
319,7 → 275,6 |
.cp_init = &r100_cp_init, |
// .cp_fini = &r100_cp_fini, |
// .cp_disable = &r100_cp_disable, |
.cp_commit = &r100_cp_commit, |
.ring_start = &r300_ring_start, |
// .irq_set = &rs600_irq_set, |
// .irq_process = &r100_irq_process, |
332,7 → 287,6 |
// .set_memory_clock = &radeon_atom_set_memory_clock, |
// .set_pcie_lanes = NULL, |
// .set_clock_gating = &radeon_atom_set_clock_gating, |
.bandwidth_update = &rs600_bandwidth_update, |
}; |
345,19 → 299,15 |
void rs690_mc_fini(struct radeon_device *rdev); |
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg); |
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
void rs690_bandwidth_update(struct radeon_device *rdev); |
static struct radeon_asic rs690_asic = { |
.init = &rs600_init, |
.init = &r300_init, |
.errata = &rs690_errata, |
.vram_info = &rs690_vram_info, |
.vga_set_state = &r100_vga_set_state, |
.gpu_reset = &r300_gpu_reset, |
.mc_init = &rs690_mc_init, |
.mc_fini = &rs690_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_init = &rs400_gart_init, |
.gart_fini = &rs400_gart_fini, |
.gart_enable = &rs400_gart_enable, |
.gart_disable = &rs400_gart_disable, |
.gart_tlb_flush = &rs400_gart_tlb_flush, |
365,7 → 315,6 |
.cp_init = &r100_cp_init, |
// .cp_fini = &r100_cp_fini, |
// .cp_disable = &r100_cp_disable, |
.cp_commit = &r100_cp_commit, |
.ring_start = &r300_ring_start, |
// .irq_set = &rs600_irq_set, |
// .irq_process = &r100_irq_process, |
378,12 → 327,8 |
// .set_memory_clock = &radeon_atom_set_memory_clock, |
// .set_pcie_lanes = NULL, |
// .set_clock_gating = &radeon_atom_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &rs690_bandwidth_update, |
}; |
/* |
* rv515 |
*/ |
398,20 → 343,18 |
void rv515_ring_start(struct radeon_device *rdev); |
uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); |
void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
void rv515_bandwidth_update(struct radeon_device *rdev); |
static struct radeon_asic rv515_asic = { |
.init = &rv515_init, |
.errata = &rv515_errata, |
.vram_info = &rv515_vram_info, |
.vga_set_state = &r100_vga_set_state, |
.gpu_reset = &rv515_gpu_reset, |
.mc_init = &rv515_mc_init, |
.mc_fini = &rv515_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_init = &rv370_pcie_gart_init, |
.gart_fini = &rv370_pcie_gart_fini, |
.gart_enable = &rv370_pcie_gart_enable, |
.gart_enable = &r300_gart_enable, |
.gart_disable = &rv370_pcie_gart_disable, |
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
.gart_set_page = &rv370_pcie_gart_set_page, |
418,7 → 361,6 |
.cp_init = &r100_cp_init, |
// .cp_fini = &r100_cp_fini, |
// .cp_disable = &r100_cp_disable, |
.cp_commit = &r100_cp_commit, |
.ring_start = &rv515_ring_start, |
// .irq_set = &r100_irq_set, |
// .irq_process = &r100_irq_process, |
431,9 → 373,6 |
// .set_memory_clock = &radeon_atom_set_memory_clock, |
// .set_pcie_lanes = &rv370_set_pcie_lanes, |
// .set_clock_gating = &radeon_atom_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &rv515_bandwidth_update, |
}; |
444,20 → 383,17 |
void r520_vram_info(struct radeon_device *rdev); |
int r520_mc_init(struct radeon_device *rdev); |
void r520_mc_fini(struct radeon_device *rdev); |
void r520_bandwidth_update(struct radeon_device *rdev); |
static struct radeon_asic r520_asic = { |
.init = &rv515_init, |
.errata = &r520_errata, |
.vram_info = &r520_vram_info, |
.vga_set_state = &r100_vga_set_state, |
.gpu_reset = &rv515_gpu_reset, |
.mc_init = &r520_mc_init, |
.mc_fini = &r520_mc_fini, |
// .wb_init = &r100_wb_init, |
// .wb_fini = &r100_wb_fini, |
.gart_init = &rv370_pcie_gart_init, |
.gart_fini = &rv370_pcie_gart_fini, |
.gart_enable = &rv370_pcie_gart_enable, |
.gart_enable = &r300_gart_enable, |
.gart_disable = &rv370_pcie_gart_disable, |
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush, |
.gart_set_page = &rv370_pcie_gart_set_page, |
464,7 → 400,6 |
.cp_init = &r100_cp_init, |
// .cp_fini = &r100_cp_fini, |
// .cp_disable = &r100_cp_disable, |
.cp_commit = &r100_cp_commit, |
.ring_start = &rv515_ring_start, |
// .irq_set = &r100_irq_set, |
// .irq_process = &r100_irq_process, |
477,23 → 412,11 |
// .set_memory_clock = &radeon_atom_set_memory_clock, |
// .set_pcie_lanes = &rv370_set_pcie_lanes, |
// .set_clock_gating = &radeon_atom_set_clock_gating, |
.set_surface_reg = r100_set_surface_reg, |
.clear_surface_reg = r100_clear_surface_reg, |
.bandwidth_update = &r520_bandwidth_update, |
}; |
/* |
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rv770,rv730,rv710 |
*/ |
int r600_init(struct radeon_device *rdev); |
void r600_fini(struct radeon_device *rdev); |
int r600_suspend(struct radeon_device *rdev); |
int r600_resume(struct radeon_device *rdev); |
void r600_vga_set_state(struct radeon_device *rdev, bool state); |
int r600_wb_init(struct radeon_device *rdev); |
void r600_wb_fini(struct radeon_device *rdev); |
void r600_cp_commit(struct radeon_device *rdev); |
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev); |
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg); |
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); |
/drivers/video/drm/radeon/radeon_atombios.c |
---|
103,8 → 103,7 |
static bool radeon_atom_apply_quirks(struct drm_device *dev, |
uint32_t supported_device, |
int *connector_type, |
struct radeon_i2c_bus_rec *i2c_bus, |
uint16_t *line_mux) |
struct radeon_i2c_bus_rec *i2c_bus) |
{ |
/* Asus M2A-VM HDMI board lists the DVI port as HDMI */ |
128,11 → 127,9 |
if ((dev->pdev->device == 0x5653) && |
(dev->pdev->subsystem_vendor == 0x1462) && |
(dev->pdev->subsystem_device == 0x0291)) { |
if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { |
if (*connector_type == DRM_MODE_CONNECTOR_LVDS) |
i2c_bus->valid = false; |
*line_mux = 53; |
} |
} |
/* Funky macbooks */ |
if ((dev->pdev->device == 0x71C5) && |
143,34 → 140,23 |
return false; |
} |
/* some BIOSes seem to report DAC on HDMI - they hurt me with their lies */ |
if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) || |
(*connector_type == DRM_MODE_CONNECTOR_HDMIB)) { |
if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { |
return false; |
} |
} |
/* ASUS HD 3600 XT board lists the DVI port as HDMI */ |
if ((dev->pdev->device == 0x9598) && |
(dev->pdev->subsystem_vendor == 0x1043) && |
(dev->pdev->subsystem_device == 0x01da)) { |
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { |
*connector_type = DRM_MODE_CONNECTOR_DVII; |
if (*connector_type == DRM_MODE_CONNECTOR_HDMIB) { |
*connector_type = DRM_MODE_CONNECTOR_DVID; |
} |
} |
/* ASUS HD 3450 board lists the DVI port as HDMI */ |
if ((dev->pdev->device == 0x95C5) && |
(dev->pdev->subsystem_vendor == 0x1043) && |
(dev->pdev->subsystem_device == 0x01e2)) { |
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { |
*connector_type = DRM_MODE_CONNECTOR_DVII; |
} |
} |
/* some BIOSes seem to report DAC on HDMI - usually this is a board with |
* HDMI + VGA reporting as HDMI |
*/ |
if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { |
if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { |
*connector_type = DRM_MODE_CONNECTOR_VGA; |
*line_mux = 0; |
} |
} |
return true; |
} |
203,11 → 189,11 |
DRM_MODE_CONNECTOR_Composite, |
DRM_MODE_CONNECTOR_SVIDEO, |
DRM_MODE_CONNECTOR_Unknown, |
DRM_MODE_CONNECTOR_Unknown, |
DRM_MODE_CONNECTOR_9PinDIN, |
DRM_MODE_CONNECTOR_Unknown, |
DRM_MODE_CONNECTOR_HDMIA, |
DRM_MODE_CONNECTOR_HDMIB, |
DRM_MODE_CONNECTOR_HDMIB, |
DRM_MODE_CONNECTOR_LVDS, |
DRM_MODE_CONNECTOR_9PinDIN, |
DRM_MODE_CONNECTOR_Unknown, |
229,7 → 215,7 |
ATOM_OBJECT_HEADER *obj_header; |
int i, j, path_size, device_support; |
int connector_type; |
uint16_t igp_lane_info, conn_id; |
uint16_t igp_lane_info; |
bool linkb; |
struct radeon_i2c_bus_rec ddc_bus; |
381,6 → 367,10 |
&& record-> |
ucRecordType <= |
ATOM_MAX_OBJECT_RECORD_NUMBER) { |
DRM_ERROR |
("record type %d\n", |
record-> |
ucRecordType); |
switch (record-> |
ucRecordType) { |
case ATOM_I2C_RECORD_TYPE: |
416,16 → 406,10 |
else |
ddc_bus = radeon_lookup_gpio(dev, line_mux); |
conn_id = le16_to_cpu(path->usConnObjectId); |
if (!radeon_atom_apply_quirks |
(dev, le16_to_cpu(path->usDeviceTag), &connector_type, |
&ddc_bus, &conn_id)) |
continue; |
radeon_add_atom_connector(dev, |
conn_id, |
le16_to_cpu(path-> |
usConnObjectId), |
le16_to_cpu(path-> |
usDeviceTag), |
connector_type, &ddc_bus, |
linkb, igp_lane_info); |
440,7 → 424,7 |
struct bios_connector { |
bool valid; |
uint16_t line_mux; |
uint8_t line_mux; |
uint16_t devices; |
int connector_type; |
struct radeon_i2c_bus_rec ddc_bus; |
484,6 → 468,11 |
continue; |
} |
if (i == ATOM_DEVICE_TV1_INDEX) { |
DRM_DEBUG("Skipping TV Out\n"); |
continue; |
} |
bios_connectors[i].connector_type = |
supported_devices_connector_convert[ci.sucConnectorInfo. |
sbfAccess. |
537,7 → 526,7 |
if (!radeon_atom_apply_quirks |
(dev, (1 << i), &bios_connectors[i].connector_type, |
&bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux)) |
&bios_connectors[i].ddc_bus)) |
continue; |
bios_connectors[i].valid = true; |
719,8 → 708,9 |
return false; |
} |
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, |
struct radeon_encoder_int_tmds *tmds) |
struct radeon_encoder_int_tmds *radeon_atombios_get_tmds_info(struct |
radeon_encoder |
*encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct radeon_device *rdev = dev->dev_private; |
731,6 → 721,7 |
uint8_t frev, crev; |
uint16_t maxfreq; |
int i; |
struct radeon_encoder_int_tmds *tmds = NULL; |
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, |
&crev, &data_offset); |
740,6 → 731,12 |
data_offset); |
if (tmds_info) { |
tmds = |
kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); |
if (!tmds) |
return NULL; |
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); |
for (i = 0; i < 4; i++) { |
tmds->tmds_pll[i].freq = |
765,9 → 762,8 |
break; |
} |
} |
return true; |
} |
return false; |
return tmds; |
} |
union lvds_info { |
859,72 → 855,6 |
return p_dac; |
} |
bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, |
SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION *crtc_timing, |
int32_t *pixel_clock) |
{ |
struct radeon_mode_info *mode_info = &rdev->mode_info; |
ATOM_ANALOG_TV_INFO *tv_info; |
ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2; |
ATOM_DTD_FORMAT *dtd_timings; |
int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); |
u8 frev, crev; |
uint16_t data_offset; |
atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset); |
switch (crev) { |
case 1: |
tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); |
if (index > MAX_SUPPORTED_TV_TIMING) |
return false; |
crtc_timing->usH_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); |
crtc_timing->usH_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); |
crtc_timing->usH_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); |
crtc_timing->usH_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); |
crtc_timing->usV_Total = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); |
crtc_timing->usV_Disp = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); |
crtc_timing->usV_SyncStart = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); |
crtc_timing->usV_SyncWidth = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); |
crtc_timing->susModeMiscInfo = tv_info->aModeTimings[index].susModeMiscInfo; |
crtc_timing->ucOverscanRight = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanRight); |
crtc_timing->ucOverscanLeft = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanLeft); |
crtc_timing->ucOverscanBottom = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanBottom); |
crtc_timing->ucOverscanTop = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_OverscanTop); |
*pixel_clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; |
if (index == 1) { |
/* PAL timings appear to have wrong values for totals */ |
crtc_timing->usH_Total -= 1; |
crtc_timing->usV_Total -= 1; |
} |
break; |
case 2: |
tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); |
if (index > MAX_SUPPORTED_TV_TIMING_V1_2) |
return false; |
dtd_timings = &tv_info_v1_2->aModeTimings[index]; |
crtc_timing->usH_Total = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time); |
crtc_timing->usH_Disp = le16_to_cpu(dtd_timings->usHActive); |
crtc_timing->usH_SyncStart = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset); |
crtc_timing->usH_SyncWidth = le16_to_cpu(dtd_timings->usHSyncWidth); |
crtc_timing->usV_Total = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time); |
crtc_timing->usV_Disp = le16_to_cpu(dtd_timings->usVActive); |
crtc_timing->usV_SyncStart = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset); |
crtc_timing->usV_SyncWidth = le16_to_cpu(dtd_timings->usVSyncWidth); |
crtc_timing->susModeMiscInfo.usAccess = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); |
*pixel_clock = le16_to_cpu(dtd_timings->usPixClk) * 10; |
break; |
} |
return true; |
} |
struct radeon_encoder_tv_dac * |
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) |
{ |
1015,10 → 945,10 |
uint32_t bios_2_scratch, bios_6_scratch; |
if (rdev->family >= CHIP_R600) { |
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); |
bios_2_scratch = RREG32(R600_BIOS_0_SCRATCH); |
bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); |
} else { |
bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); |
bios_2_scratch = RREG32(RADEON_BIOS_0_SCRATCH); |
bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); |
} |
1038,34 → 968,6 |
} |
void radeon_save_bios_scratch_regs(struct radeon_device *rdev) |
{ |
uint32_t scratch_reg; |
int i; |
if (rdev->family >= CHIP_R600) |
scratch_reg = R600_BIOS_0_SCRATCH; |
else |
scratch_reg = RADEON_BIOS_0_SCRATCH; |
for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) |
rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4)); |
} |
void radeon_restore_bios_scratch_regs(struct radeon_device *rdev) |
{ |
uint32_t scratch_reg; |
int i; |
if (rdev->family >= CHIP_R600) |
scratch_reg = R600_BIOS_0_SCRATCH; |
else |
scratch_reg = RADEON_BIOS_0_SCRATCH; |
for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) |
WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]); |
} |
void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) |
{ |
struct drm_device *dev = encoder->dev; |
/drivers/video/drm/radeon/radeon_clocks.c |
---|
102,12 → 102,10 |
p1pll->reference_div = 12; |
if (p2pll->reference_div < 2) |
p2pll->reference_div = 12; |
if (rdev->family < CHIP_RS600) { |
if (spll->reference_div < 2) |
spll->reference_div = |
RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & |
RADEON_M_SPLL_REF_DIV_MASK; |
} |
if (mpll->reference_div < 2) |
mpll->reference_div = spll->reference_div; |
} else { |
/drivers/video/drm/radeon/radeon_fb.c |
---|
27,8 → 27,16 |
* Modularization |
*/ |
#include <linux/module.h> |
#include <linux/fb.h> |
//#include <linux/module.h> |
//#include <linux/kernel.h> |
//#include <linux/errno.h> |
//#include <linux/string.h> |
//#include <linux/mm.h> |
//#include <linux/tty.h> |
//#include <linux/slab.h> |
//#include <linux/delay.h> |
//#include <linux/fb.h> |
//#include <linux/init.h> |
#include "drmP.h" |
#include "drm.h" |
37,29 → 45,559 |
#include "radeon_drm.h" |
#include "radeon.h" |
#include "drm_fb_helper.h" |
#include <drm_mm.h> |
#include "radeon_object.h" |
struct fb_info *framebuffer_alloc(size_t size); |
#define FB_TYPE_PACKED_PIXELS 0 /* Packed Pixels */ |
#define FB_VISUAL_TRUECOLOR 2 /* True color */ |
struct fb_fix_screeninfo { |
char id[16]; /* identification string eg "TT Builtin" */ |
unsigned long smem_start; /* Start of frame buffer mem */ |
/* (physical address) */ |
__u32 smem_len; /* Length of frame buffer mem */ |
__u32 type; /* see FB_TYPE_* */ |
__u32 type_aux; /* Interleave for interleaved Planes */ |
__u32 visual; /* see FB_VISUAL_* */ |
__u16 xpanstep; /* zero if no hardware panning */ |
__u16 ypanstep; /* zero if no hardware panning */ |
__u16 ywrapstep; /* zero if no hardware ywrap */ |
__u32 line_length; /* length of a line in bytes */ |
unsigned long mmio_start; /* Start of Memory Mapped I/O */ |
/* (physical address) */ |
__u32 mmio_len; /* Length of Memory Mapped I/O */ |
__u32 accel; /* Indicate to driver which */ |
/* specific chip/card we have */ |
__u16 reserved[3]; /* Reserved for future compatibility */ |
}; |
struct fb_bitfield { |
__u32 offset; /* beginning of bitfield */ |
__u32 length; /* length of bitfield */ |
__u32 msb_right; /* != 0 : Most significant bit is */ |
/* right */ |
}; |
struct fb_var_screeninfo { |
__u32 xres; /* visible resolution */ |
__u32 yres; |
__u32 xres_virtual; /* virtual resolution */ |
__u32 yres_virtual; |
__u32 xoffset; /* offset from virtual to visible */ |
__u32 yoffset; /* resolution */ |
__u32 bits_per_pixel; /* guess what */ |
__u32 grayscale; /* != 0 Graylevels instead of colors */ |
struct fb_bitfield red; /* bitfield in fb mem if true color, */ |
struct fb_bitfield green; /* else only length is significant */ |
struct fb_bitfield blue; |
struct fb_bitfield transp; /* transparency */ |
__u32 nonstd; /* != 0 Non standard pixel format */ |
__u32 activate; /* see FB_ACTIVATE_* */ |
__u32 height; /* height of picture in mm */ |
__u32 width; /* width of picture in mm */ |
__u32 accel_flags; /* (OBSOLETE) see fb_info.flags */ |
/* Timing: All values in pixclocks, except pixclock (of course) */ |
__u32 pixclock; /* pixel clock in ps (pico seconds) */ |
__u32 left_margin; /* time from sync to picture */ |
__u32 right_margin; /* time from picture to sync */ |
__u32 upper_margin; /* time from sync to picture */ |
__u32 lower_margin; |
__u32 hsync_len; /* length of horizontal sync */ |
__u32 vsync_len; /* length of vertical sync */ |
__u32 sync; /* see FB_SYNC_* */ |
__u32 vmode; /* see FB_VMODE_* */ |
__u32 rotate; /* angle we rotate counter clockwise */ |
__u32 reserved[5]; /* Reserved for future compatibility */ |
}; |
struct fb_chroma { |
__u32 redx; /* in fraction of 1024 */ |
__u32 greenx; |
__u32 bluex; |
__u32 whitex; |
__u32 redy; |
__u32 greeny; |
__u32 bluey; |
__u32 whitey; |
}; |
struct fb_videomode { |
const char *name; /* optional */ |
u32 refresh; /* optional */ |
u32 xres; |
u32 yres; |
u32 pixclock; |
u32 left_margin; |
u32 right_margin; |
u32 upper_margin; |
u32 lower_margin; |
u32 hsync_len; |
u32 vsync_len; |
u32 sync; |
u32 vmode; |
u32 flag; |
}; |
struct fb_monspecs { |
struct fb_chroma chroma; |
struct fb_videomode *modedb; /* mode database */ |
__u8 manufacturer[4]; /* Manufacturer */ |
__u8 monitor[14]; /* Monitor String */ |
__u8 serial_no[14]; /* Serial Number */ |
__u8 ascii[14]; /* ? */ |
__u32 modedb_len; /* mode database length */ |
__u32 model; /* Monitor Model */ |
__u32 serial; /* Serial Number - Integer */ |
__u32 year; /* Year manufactured */ |
__u32 week; /* Week Manufactured */ |
__u32 hfmin; /* hfreq lower limit (Hz) */ |
__u32 hfmax; /* hfreq upper limit (Hz) */ |
__u32 dclkmin; /* pixelclock lower limit (Hz) */ |
__u32 dclkmax; /* pixelclock upper limit (Hz) */ |
__u16 input; /* display type - see FB_DISP_* */ |
__u16 dpms; /* DPMS support - see FB_DPMS_ */ |
__u16 signal; /* Signal Type - see FB_SIGNAL_* */ |
__u16 vfmin; /* vfreq lower limit (Hz) */ |
__u16 vfmax; /* vfreq upper limit (Hz) */ |
__u16 gamma; /* Gamma - in fractions of 100 */ |
__u16 gtf : 1; /* supports GTF */ |
__u16 misc; /* Misc flags - see FB_MISC_* */ |
__u8 version; /* EDID version... */ |
__u8 revision; /* ...and revision */ |
__u8 max_x; /* Maximum horizontal size (cm) */ |
__u8 max_y; /* Maximum vertical size (cm) */ |
}; |
struct fb_info { |
int node; |
int flags; |
// struct mutex lock; /* Lock for open/release/ioctl funcs */ |
// struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ |
struct fb_var_screeninfo var; /* Current var */ |
struct fb_fix_screeninfo fix; /* Current fix */ |
struct fb_monspecs monspecs; /* Current Monitor specs */ |
// struct work_struct queue; /* Framebuffer event queue */ |
// struct fb_pixmap pixmap; /* Image hardware mapper */ |
// struct fb_pixmap sprite; /* Cursor hardware mapper */ |
// struct fb_cmap cmap; /* Current cmap */ |
struct list_head modelist; /* mode list */ |
struct fb_videomode *mode; /* current mode */ |
#ifdef CONFIG_FB_BACKLIGHT |
/* assigned backlight device */ |
/* set before framebuffer registration, |
remove after unregister */ |
struct backlight_device *bl_dev; |
/* Backlight level curve */ |
struct mutex bl_curve_mutex; |
u8 bl_curve[FB_BACKLIGHT_LEVELS]; |
#endif |
#ifdef CONFIG_FB_DEFERRED_IO |
struct delayed_work deferred_work; |
struct fb_deferred_io *fbdefio; |
#endif |
struct fb_ops *fbops; |
// struct device *device; /* This is the parent */ |
// struct device *dev; /* This is this fb device */ |
int class_flag; /* private sysfs flags */ |
#ifdef CONFIG_FB_TILEBLITTING |
struct fb_tile_ops *tileops; /* Tile Blitting */ |
#endif |
char __iomem *screen_base; /* Virtual address */ |
unsigned long screen_size; /* Amount of ioremapped VRAM or 0 */ |
void *pseudo_palette; /* Fake palette of 16 colors */ |
#define FBINFO_STATE_RUNNING 0 |
#define FBINFO_STATE_SUSPENDED 1 |
u32 state; /* Hardware state i.e suspend */ |
void *fbcon_par; /* fbcon use-only private area */ |
/* From here on everything is device dependent */ |
void *par; |
/* we need the PCI or similiar aperture base/size not |
smem_start/size as smem_start may just be an object |
allocated inside the aperture so may not actually overlap */ |
resource_size_t aperture_base; |
resource_size_t aperture_size; |
}; |
struct radeon_fb_device { |
struct drm_fb_helper helper; |
struct radeon_device *rdev; |
struct drm_display_mode *mode; |
struct radeon_framebuffer *rfb; |
struct radeon_device *rdev; |
int crtc_count; |
/* crtc currently bound to this */ |
uint32_t crtc_ids[2]; |
}; |
int radeon_gem_fb_object_create(struct radeon_device *rdev, int size, |
int alignment, int initial_domain, |
bool discardable, bool kernel, |
bool interruptible, |
struct drm_gem_object **obj); |
struct fb_info *framebuffer_alloc(size_t size); |
#if 0 |
static int radeonfb_setcolreg(unsigned regno, |
unsigned red, |
unsigned green, |
unsigned blue, |
unsigned transp, |
struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_crtc *crtc; |
int i; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct drm_mode_set *modeset = &radeon_crtc->mode_set; |
struct drm_framebuffer *fb = modeset->fb; |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
if (i == rfbdev->crtc_count) { |
continue; |
} |
if (regno > 255) { |
return 1; |
} |
if (fb->depth == 8) { |
radeon_crtc_fb_gamma_set(crtc, red, green, blue, regno); |
return 0; |
} |
if (regno < 16) { |
switch (fb->depth) { |
case 15: |
fb->pseudo_palette[regno] = ((red & 0xf800) >> 1) | |
((green & 0xf800) >> 6) | |
((blue & 0xf800) >> 11); |
break; |
case 16: |
fb->pseudo_palette[regno] = (red & 0xf800) | |
((green & 0xfc00) >> 5) | |
((blue & 0xf800) >> 11); |
break; |
case 24: |
case 32: |
fb->pseudo_palette[regno] = ((red & 0xff00) << 8) | |
(green & 0xff00) | |
((blue & 0xff00) >> 8); |
break; |
} |
} |
} |
return 0; |
} |
static int radeonfb_check_var(struct fb_var_screeninfo *var, |
struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct radeon_framebuffer *rfb = rfbdev->rfb; |
struct drm_framebuffer *fb = &rfb->base; |
int depth; |
if (var->pixclock == -1 || !var->pixclock) { |
return -EINVAL; |
} |
/* Need to resize the fb object !!! */ |
if (var->xres > fb->width || var->yres > fb->height) { |
DRM_ERROR("Requested width/height is greater than current fb " |
"object %dx%d > %dx%d\n", var->xres, var->yres, |
fb->width, fb->height); |
DRM_ERROR("Need resizing code.\n"); |
return -EINVAL; |
} |
switch (var->bits_per_pixel) { |
case 16: |
depth = (var->green.length == 6) ? 16 : 15; |
break; |
case 32: |
depth = (var->transp.length > 0) ? 32 : 24; |
break; |
default: |
depth = var->bits_per_pixel; |
break; |
} |
switch (depth) { |
case 8: |
var->red.offset = 0; |
var->green.offset = 0; |
var->blue.offset = 0; |
var->red.length = 8; |
var->green.length = 8; |
var->blue.length = 8; |
var->transp.length = 0; |
var->transp.offset = 0; |
break; |
case 15: |
var->red.offset = 10; |
var->green.offset = 5; |
var->blue.offset = 0; |
var->red.length = 5; |
var->green.length = 5; |
var->blue.length = 5; |
var->transp.length = 1; |
var->transp.offset = 15; |
break; |
case 16: |
var->red.offset = 11; |
var->green.offset = 5; |
var->blue.offset = 0; |
var->red.length = 5; |
var->green.length = 6; |
var->blue.length = 5; |
var->transp.length = 0; |
var->transp.offset = 0; |
break; |
case 24: |
var->red.offset = 16; |
var->green.offset = 8; |
var->blue.offset = 0; |
var->red.length = 8; |
var->green.length = 8; |
var->blue.length = 8; |
var->transp.length = 0; |
var->transp.offset = 0; |
break; |
case 32: |
var->red.offset = 16; |
var->green.offset = 8; |
var->blue.offset = 0; |
var->red.length = 8; |
var->green.length = 8; |
var->blue.length = 8; |
var->transp.length = 8; |
var->transp.offset = 24; |
break; |
default: |
return -EINVAL; |
} |
return 0; |
} |
#endif |
/* this will let fbcon do the mode init */ |
static int radeonfb_set_par(struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct fb_var_screeninfo *var = &info->var; |
struct drm_crtc *crtc; |
int ret; |
int i; |
if (var->pixclock != -1) { |
DRM_ERROR("PIXEL CLCOK SET\n"); |
return -EINVAL; |
} |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
if (i == rfbdev->crtc_count) { |
continue; |
} |
if (crtc->fb == radeon_crtc->mode_set.fb) { |
// mutex_lock(&dev->mode_config.mutex); |
ret = crtc->funcs->set_config(&radeon_crtc->mode_set); |
// mutex_unlock(&dev->mode_config.mutex); |
if (ret) { |
return ret; |
} |
} |
} |
return 0; |
} |
#if 0 |
static int radeonfb_pan_display(struct fb_var_screeninfo *var, |
struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_mode_set *modeset; |
struct drm_crtc *crtc; |
struct radeon_crtc *radeon_crtc; |
int ret = 0; |
int i; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
if (i == rfbdev->crtc_count) { |
continue; |
} |
radeon_crtc = to_radeon_crtc(crtc); |
modeset = &radeon_crtc->mode_set; |
modeset->x = var->xoffset; |
modeset->y = var->yoffset; |
if (modeset->num_connectors) { |
mutex_lock(&dev->mode_config.mutex); |
ret = crtc->funcs->set_config(modeset); |
mutex_unlock(&dev->mode_config.mutex); |
if (!ret) { |
info->var.xoffset = var->xoffset; |
info->var.yoffset = var->yoffset; |
} |
} |
} |
return ret; |
} |
static void radeonfb_on(struct fb_info *info) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_crtc *crtc; |
struct drm_encoder *encoder; |
int i; |
/* |
* For each CRTC in this fb, find all associated encoders |
* and turn them off, then turn off the CRTC. |
*/ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
mutex_lock(&dev->mode_config.mutex); |
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); |
mutex_unlock(&dev->mode_config.mutex); |
/* Found a CRTC on this fb, now find encoders */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc == crtc) { |
struct drm_encoder_helper_funcs *encoder_funcs; |
encoder_funcs = encoder->helper_private; |
mutex_lock(&dev->mode_config.mutex); |
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
mutex_unlock(&dev->mode_config.mutex); |
} |
} |
} |
} |
static void radeonfb_off(struct fb_info *info, int dpms_mode) |
{ |
struct radeon_fb_device *rfbdev = info->par; |
struct drm_device *dev = rfbdev->rdev->ddev; |
struct drm_crtc *crtc; |
struct drm_encoder *encoder; |
int i; |
/* |
* For each CRTC in this fb, find all associated encoders |
* and turn them off, then turn off the CRTC. |
*/ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
for (i = 0; i < rfbdev->crtc_count; i++) { |
if (crtc->base.id == rfbdev->crtc_ids[i]) { |
break; |
} |
} |
/* Found a CRTC on this fb, now find encoders */ |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc == crtc) { |
struct drm_encoder_helper_funcs *encoder_funcs; |
encoder_funcs = encoder->helper_private; |
mutex_lock(&dev->mode_config.mutex); |
encoder_funcs->dpms(encoder, dpms_mode); |
mutex_unlock(&dev->mode_config.mutex); |
} |
} |
if (dpms_mode == DRM_MODE_DPMS_OFF) { |
mutex_lock(&dev->mode_config.mutex); |
crtc_funcs->dpms(crtc, dpms_mode); |
mutex_unlock(&dev->mode_config.mutex); |
} |
} |
} |
int radeonfb_blank(int blank, struct fb_info *info) |
{ |
switch (blank) { |
case FB_BLANK_UNBLANK: |
radeonfb_on(info); |
break; |
case FB_BLANK_NORMAL: |
radeonfb_off(info, DRM_MODE_DPMS_STANDBY); |
break; |
case FB_BLANK_HSYNC_SUSPEND: |
radeonfb_off(info, DRM_MODE_DPMS_STANDBY); |
break; |
case FB_BLANK_VSYNC_SUSPEND: |
radeonfb_off(info, DRM_MODE_DPMS_SUSPEND); |
break; |
case FB_BLANK_POWERDOWN: |
radeonfb_off(info, DRM_MODE_DPMS_OFF); |
break; |
} |
return 0; |
} |
static struct fb_ops radeonfb_ops = { |
// .owner = THIS_MODULE, |
.fb_check_var = drm_fb_helper_check_var, |
.fb_set_par = drm_fb_helper_set_par, |
.fb_setcolreg = drm_fb_helper_setcolreg, |
// .fb_fillrect = cfb_fillrect, |
// .fb_copyarea = cfb_copyarea, |
// .fb_imageblit = cfb_imageblit, |
// .fb_pan_display = drm_fb_helper_pan_display, |
.fb_blank = drm_fb_helper_blank, |
.owner = THIS_MODULE, |
.fb_check_var = radeonfb_check_var, |
.fb_set_par = radeonfb_set_par, |
.fb_setcolreg = radeonfb_setcolreg, |
.fb_fillrect = cfb_fillrect, |
.fb_copyarea = cfb_copyarea, |
.fb_imageblit = cfb_imageblit, |
.fb_pan_display = radeonfb_pan_display, |
.fb_blank = radeonfb_blank, |
}; |
/** |
102,10 → 640,26 |
} |
EXPORT_SYMBOL(radeonfb_resize); |
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) |
static struct drm_mode_set panic_mode; |
int radeonfb_panic(struct notifier_block *n, unsigned long ununsed, |
void *panic_str) |
{ |
DRM_ERROR("panic occurred, switching back to text console\n"); |
drm_crtc_helper_set_config(&panic_mode); |
return 0; |
} |
EXPORT_SYMBOL(radeonfb_panic); |
static struct notifier_block paniced = { |
.notifier_call = radeonfb_panic, |
}; |
#endif |
static int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp) |
{ |
int aligned = width; |
int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; |
int align_large = (ASIC_IS_AVIVO(rdev)); |
int pitch_mask = 0; |
switch (bpp / 8) { |
126,16 → 680,11 |
return aligned; |
} |
static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { |
.gamma_set = radeon_crtc_fb_gamma_set, |
}; |
int radeonfb_create(struct drm_device *dev, |
int radeonfb_create(struct radeon_device *rdev, |
uint32_t fb_width, uint32_t fb_height, |
uint32_t surface_width, uint32_t surface_height, |
struct drm_framebuffer **fb_p) |
struct radeon_framebuffer **rfb_p) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct fb_info *info; |
struct radeon_fb_device *rfbdev; |
struct drm_framebuffer *fb = NULL; |
148,15 → 697,15 |
u64 fb_gpuaddr; |
void *fbptr = NULL; |
unsigned long tmp; |
bool fb_tiled = false; /* useful for testing */ |
u32 tiling_flags = 0; |
ENTRY(); |
mode_cmd.width = surface_width; |
mode_cmd.height = surface_height; |
mode_cmd.bpp = 32; |
/* need to align pitch with crtc limits */ |
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); |
mode_cmd.depth = 24; |
mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp) * ((mode_cmd.bpp + 1) / 8); |
mode_cmd.depth = 32; |
size = mode_cmd.pitch * mode_cmd.height; |
aligned_size = ALIGN(size, PAGE_SIZE); |
166,6 → 715,7 |
false, 0, |
false, &gobj); |
if (ret) { |
printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n", |
surface_width, surface_height); |
174,7 → 724,7 |
} |
robj = gobj->driver_private; |
mutex_lock(&rdev->ddev->struct_mutex); |
// mutex_lock(&rdev->ddev->struct_mutex); |
fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj); |
if (fb == NULL) { |
DRM_ERROR("failed to allocate fb.\n"); |
190,8 → 740,8 |
list_add(&fb->filp_head, &rdev->ddev->mode_config.fb_kernel_list); |
*fb_p = fb; |
rfb = to_radeon_framebuffer(fb); |
*rfb_p = rfb; |
rdev->fbdev_rfb = rfb; |
rdev->fbdev_robj = robj; |
200,15 → 750,7 |
ret = -ENOMEM; |
goto out_unref; |
} |
rdev->fbdev_info = info; |
rfbdev = info->par; |
rfbdev->helper.funcs = &radeon_fb_helper_funcs; |
rfbdev->helper.dev = dev; |
ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, 2, |
RADEONFB_CONN_LIMIT); |
if (ret) |
goto out_unref; |
// ret = radeon_object_kmap(robj, &fbptr); |
// if (ret) { |
215,28 → 757,37 |
// goto out_unref; |
// } |
fbptr = (void*)0xFE000000; // LFB_BASE |
strcpy(info->fix.id, "radeondrmfb"); |
drm_fb_helper_fill_fix(info, fb->pitch); |
info->flags = FBINFO_DEFAULT; |
info->fbops = &radeonfb_ops; |
info->fix.type = FB_TYPE_PACKED_PIXELS; |
info->fix.visual = FB_VISUAL_TRUECOLOR; |
info->fix.type_aux = 0; |
info->fix.xpanstep = 1; /* doing it in hw */ |
info->fix.ypanstep = 1; /* doing it in hw */ |
info->fix.ywrapstep = 0; |
// info->fix.accel = FB_ACCEL_NONE; |
info->fix.type_aux = 0; |
// info->flags = FBINFO_DEFAULT; |
// info->fbops = &radeonfb_ops; |
info->fix.line_length = fb->pitch; |
tmp = fb_gpuaddr - rdev->mc.vram_location; |
info->fix.smem_start = rdev->mc.aper_base + tmp; |
info->fix.smem_len = size; |
info->screen_base = fbptr; |
info->screen_size = size; |
drm_fb_helper_fill_var(info, fb, fb_width, fb_height); |
/* setup aperture base/size for vesafb takeover */ |
info->aperture_base = rdev->ddev->mode_config.fb_base; |
info->aperture_size = rdev->mc.real_vram_size; |
info->pseudo_palette = fb->pseudo_palette; |
info->var.xres_virtual = fb->width; |
info->var.yres_virtual = fb->height; |
info->var.bits_per_pixel = fb->bits_per_pixel; |
info->var.xoffset = 0; |
info->var.yoffset = 0; |
// info->var.activate = FB_ACTIVATE_NOW; |
info->var.height = -1; |
info->var.width = -1; |
info->var.xres = fb_width; |
info->var.yres = fb_height; |
info->fix.mmio_start = 0; |
info->fix.mmio_len = 0; |
// info->pixmap.size = 64*1024; |
254,6 → 805,60 |
DRM_INFO("fb depth is %d\n", fb->depth); |
DRM_INFO(" pitch is %d\n", fb->pitch); |
switch (fb->depth) { |
case 8: |
info->var.red.offset = 0; |
info->var.green.offset = 0; |
info->var.blue.offset = 0; |
info->var.red.length = 8; /* 8bit DAC */ |
info->var.green.length = 8; |
info->var.blue.length = 8; |
info->var.transp.offset = 0; |
info->var.transp.length = 0; |
break; |
case 15: |
info->var.red.offset = 10; |
info->var.green.offset = 5; |
info->var.blue.offset = 0; |
info->var.red.length = 5; |
info->var.green.length = 5; |
info->var.blue.length = 5; |
info->var.transp.offset = 15; |
info->var.transp.length = 1; |
break; |
case 16: |
info->var.red.offset = 11; |
info->var.green.offset = 5; |
info->var.blue.offset = 0; |
info->var.red.length = 5; |
info->var.green.length = 6; |
info->var.blue.length = 5; |
info->var.transp.offset = 0; |
break; |
case 24: |
info->var.red.offset = 16; |
info->var.green.offset = 8; |
info->var.blue.offset = 0; |
info->var.red.length = 8; |
info->var.green.length = 8; |
info->var.blue.length = 8; |
info->var.transp.offset = 0; |
info->var.transp.length = 0; |
break; |
case 32: |
info->var.red.offset = 16; |
info->var.green.offset = 8; |
info->var.blue.offset = 0; |
info->var.red.length = 8; |
info->var.green.length = 8; |
info->var.blue.length = 8; |
info->var.transp.offset = 24; |
info->var.transp.length = 8; |
break; |
default: |
break; |
} |
dbgprintf("fb = %x\n", fb); |
fb->fbdev = info; |
260,7 → 865,7 |
rfbdev->rfb = rfb; |
rfbdev->rdev = rdev; |
mutex_unlock(&rdev->ddev->struct_mutex); |
// mutex_unlock(&rdev->ddev->struct_mutex); |
return 0; |
out_unref: |
274,15 → 879,156 |
kfree(fb); |
} |
// drm_gem_object_unreference(gobj); |
mutex_unlock(&rdev->ddev->struct_mutex); |
// mutex_unlock(&rdev->ddev->struct_mutex); |
out: |
return ret; |
} |
static int radeonfb_single_fb_probe(struct radeon_device *rdev) |
{ |
struct drm_crtc *crtc; |
struct drm_connector *connector; |
unsigned int fb_width = (unsigned)-1, fb_height = (unsigned)-1; |
unsigned int surface_width = 0, surface_height = 0; |
int new_fb = 0; |
int crtc_count = 0; |
int ret, i, conn_count = 0; |
struct radeon_framebuffer *rfb; |
struct fb_info *info; |
struct radeon_fb_device *rfbdev; |
struct drm_mode_set *modeset = NULL; |
ENTRY(); |
/* first up get a count of crtcs now in use and new min/maxes width/heights */ |
list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) { |
if (drm_helper_crtc_in_use(crtc)) { |
if (crtc->desired_mode) { |
if (crtc->desired_mode->hdisplay < fb_width) |
fb_width = crtc->desired_mode->hdisplay; |
if (crtc->desired_mode->vdisplay < fb_height) |
fb_height = crtc->desired_mode->vdisplay; |
if (crtc->desired_mode->hdisplay > surface_width) |
surface_width = crtc->desired_mode->hdisplay; |
if (crtc->desired_mode->vdisplay > surface_height) |
surface_height = crtc->desired_mode->vdisplay; |
} |
crtc_count++; |
} |
} |
if (crtc_count == 0 || fb_width == -1 || fb_height == -1) { |
/* hmm everyone went away - assume VGA cable just fell out |
and will come back later. */ |
dbgprintf("crtc count %x width %x height %x\n", |
crtc_count, fb_width, fb_height); |
return 0; |
} |
/* do we have an fb already? */ |
if (list_empty(&rdev->ddev->mode_config.fb_kernel_list)) { |
/* create an fb if we don't have one */ |
ret = radeonfb_create(rdev, fb_width, fb_height, surface_width, surface_height, &rfb); |
if (ret) { |
return -EINVAL; |
} |
new_fb = 1; |
} else { |
struct drm_framebuffer *fb; |
fb = list_first_entry(&rdev->ddev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head); |
rfb = to_radeon_framebuffer(fb); |
/* if someone hotplugs something bigger than we have already allocated, we are pwned. |
As really we can't resize an fbdev that is in the wild currently due to fbdev |
not really being designed for the lower layers moving stuff around under it. |
- so in the grand style of things - punt. */ |
if ((fb->width < surface_width) || (fb->height < surface_height)) { |
DRM_ERROR("Framebuffer not large enough to scale console onto.\n"); |
return -EINVAL; |
} |
} |
info = rfb->base.fbdev; |
rdev->fbdev_info = info; |
rfbdev = info->par; |
crtc_count = 0; |
/* okay we need to setup new connector sets in the crtcs */ |
list_for_each_entry(crtc, &rdev->ddev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
modeset = &radeon_crtc->mode_set; |
modeset->fb = &rfb->base; |
conn_count = 0; |
list_for_each_entry(connector, &rdev->ddev->mode_config.connector_list, head) { |
if (connector->encoder) |
if (connector->encoder->crtc == modeset->crtc) { |
modeset->connectors[conn_count] = connector; |
conn_count++; |
if (conn_count > RADEONFB_CONN_LIMIT) |
BUG(); |
} |
} |
for (i = conn_count; i < RADEONFB_CONN_LIMIT; i++) |
modeset->connectors[i] = NULL; |
rfbdev->crtc_ids[crtc_count++] = crtc->base.id; |
modeset->num_connectors = conn_count; |
if (modeset->crtc->desired_mode) { |
if (modeset->mode) { |
drm_mode_destroy(rdev->ddev, modeset->mode); |
} |
modeset->mode = drm_mode_duplicate(rdev->ddev, |
modeset->crtc->desired_mode); |
} |
} |
rfbdev->crtc_count = crtc_count; |
if (new_fb) { |
info->var.pixclock = -1; |
// if (register_framebuffer(info) < 0) |
// return -EINVAL; |
} else { |
radeonfb_set_par(info); |
} |
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, |
info->fix.id); |
/* Switch back to kernel console on panic */ |
// panic_mode = *modeset; |
// atomic_notifier_chain_register(&panic_notifier_list, &paniced); |
// printk(KERN_INFO "registered panic notifier\n"); |
LEAVE(); |
return 0; |
} |
int radeonfb_probe(struct drm_device *dev) |
{ |
int ret; |
ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create); |
/* something has changed in the lower levels of hell - deal with it |
here */ |
/* two modes : a) 1 fb to rule all crtcs. |
b) one fb per crtc. |
two actions 1) new connected device |
2) device removed. |
case a/1 : if the fb surface isn't big enough - resize the surface fb. |
if the fb size isn't big enough - resize fb into surface. |
if everything big enough configure the new crtc/etc. |
case a/2 : undo the configuration |
possibly resize down the fb to fit the new configuration. |
case b/1 : see if it is on a new crtc - setup a new fb and add it. |
case b/2 : teardown the new fb. |
*/ |
ret = radeonfb_single_fb_probe(dev->dev_private); |
return ret; |
} |
EXPORT_SYMBOL(radeonfb_probe); |
298,7 → 1044,6 |
} |
info = fb->fbdev; |
if (info) { |
struct radeon_fb_device *rfbdev = info->par; |
robj = rfb->obj->driver_private; |
// unregister_framebuffer(info); |
// radeon_object_kunmap(robj); |
307,7 → 1052,8 |
} |
printk(KERN_INFO "unregistered panic notifier\n"); |
// atomic_notifier_chain_unregister(&panic_notifier_list, &paniced); |
// memset(&panic_mode, 0, sizeof(struct drm_mode_set)); |
return 0; |
} |
EXPORT_SYMBOL(radeonfb_remove); |
446,6 → 1192,8 |
bool ret; |
ENTRY(); |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) |
{ |
struct drm_display_mode *mode; |
496,17 → 1244,17 |
fb->width = width; |
fb->height = height; |
fb->pitch = radeon_align_pitch(dev->dev_private, width, 32, false) * ((32 + 1) / 8); |
fb->pitch = radeon_align_pitch(dev->dev_private, width, 32) |
* ((32 + 1) / 8); |
crtc->fb = fb; |
ret = drm_crtc_helper_set_mode(crtc, mode, 0, 0, fb); |
sysSetScreen(width, height, fb->pitch); |
sysSetScreen(width,height); |
if (ret == true) |
{ |
dbgprintf("new mode %d %d pitch %d\n", width, height, fb->pitch); |
} |
else |
{ |
/drivers/video/drm/radeon/radeon_gart.c |
---|
75,9 → 75,10 |
int radeon_gart_table_vram_alloc(struct radeon_device *rdev) |
{ |
uint32_t gpu_addr; |
int r; |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
if (rdev->gart.table.vram.robj == NULL) { |
r = radeon_object_create(rdev, NULL, |
89,14 → 90,6 |
return r; |
} |
} |
return 0; |
} |
int radeon_gart_table_vram_pin(struct radeon_device *rdev) |
{ |
uint64_t gpu_addr; |
int r; |
r = radeon_object_pin(rdev->gart.table.vram.robj, |
RADEON_GEM_DOMAIN_VRAM, &gpu_addr); |
if (r) { |
182,8 → 175,7 |
uint64_t page_base; |
int i, j; |
ENTER(); |
dbgprintf("%s ",__FUNCTION__); |
dbgprintf("offset %x pages %x list %x\n", |
offset, pages, pagelist); |
215,7 → 207,7 |
mb(); |
radeon_gart_tlb_flush(rdev); |
LEAVE(); |
dbgprintf("done %s\n",__FUNCTION__); |
return 0; |
} |
223,7 → 215,7 |
int radeon_gart_init(struct radeon_device *rdev) |
{ |
ENTER(); |
dbgprintf("%s\n",__FUNCTION__); |
if (rdev->gart.pages) { |
return 0; |
/drivers/video/drm/radeon/radeon_object.c |
---|
29,8 → 29,9 |
* Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> |
* Dave Airlie |
*/ |
#include <linux/list.h> |
#include <drm/drmP.h> |
#include <list.h> |
#include <drmP.h> |
#include "radeon_drm.h" |
#include "radeon.h" |
#include <drm_mm.h> |
/drivers/video/drm/radeon/atom.h |
---|
25,7 → 25,7 |
#ifndef ATOM_H |
#define ATOM_H |
#include <linux/types.h> |
#include <types.h> |
#include "drmP.h" |
#define ATOM_BIOS_MAGIC 0xAA55 |
/drivers/video/drm/radeon/radeon_display.c |
---|
158,6 → 158,9 |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
if (radeon_crtc->mode_set.mode) { |
drm_mode_destroy(crtc->dev, radeon_crtc->mode_set.mode); |
} |
drm_crtc_cleanup(crtc); |
kfree(radeon_crtc); |
} |
164,7 → 167,7 |
static const struct drm_crtc_funcs radeon_crtc_funcs = { |
// .cursor_set = radeon_crtc_cursor_set, |
.cursor_move = radeon_crtc_cursor_move, |
// .cursor_move = radeon_crtc_cursor_move, |
.gamma_set = radeon_crtc_gamma_set, |
.set_config = drm_crtc_helper_set_config, |
.destroy = radeon_crtc_destroy, |
176,6 → 179,8 |
struct radeon_crtc *radeon_crtc; |
int i; |
ENTRY(); |
radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); |
if (radeon_crtc == NULL) |
return; |
184,13 → 189,10 |
drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); |
radeon_crtc->crtc_id = index; |
rdev->mode_info.crtcs[index] = radeon_crtc; |
#if 0 |
radeon_crtc->mode_set.crtc = &radeon_crtc->base; |
radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); |
radeon_crtc->mode_set.num_connectors = 0; |
#endif |
for (i = 0; i < 256; i++) { |
radeon_crtc->lut_r[i] = i << 2; |
202,6 → 204,8 |
radeon_atombios_init_crtc(dev, radeon_crtc); |
else |
radeon_legacy_init_crtc(dev, radeon_crtc); |
LEAVE(); |
} |
static const char *encoder_names[34] = { |
312,12 → 316,14 |
} |
} |
static bool radeon_setup_enc_conn(struct drm_device *dev) |
bool radeon_setup_enc_conn(struct drm_device *dev) |
{ |
struct radeon_device *rdev = dev->dev_private; |
struct drm_connector *drm_connector; |
bool ret = false; |
ENTRY(); |
if (rdev->bios) { |
if (rdev->is_atom_bios) { |
if (rdev->family >= CHIP_R600) |
335,6 → 341,7 |
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) |
radeon_ddc_dump(drm_connector); |
} |
LEAVE(); |
return ret; |
} |
346,13 → 353,9 |
if (!radeon_connector->ddc_bus) |
return -1; |
if (!radeon_connector->edid) { |
radeon_i2c_do_lock(radeon_connector, 1); |
edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter); |
radeon_i2c_do_lock(radeon_connector, 0); |
} else |
edid = radeon_connector->edid; |
if (edid) { |
/* update digital bits here */ |
if (edid->input & DRM_EDID_INPUT_DIGITAL) |
365,7 → 368,7 |
return ret; |
} |
drm_mode_connector_update_edid_property(&radeon_connector->base, NULL); |
return 0; |
return -1; |
} |
static int radeon_ddc_dump(struct drm_connector *connector) |
495,10 → 498,6 |
tmp += (uint64_t)pll->reference_freq * 1000 * frac_feedback_div; |
current_freq = radeon_div(tmp, ref_div * post_div); |
if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { |
error = freq - current_freq; |
error = error < 0 ? 0xffffffff : error; |
} else |
error = abs(current_freq - freq); |
vco_diff = abs(vco - best_vco); |
557,6 → 556,7 |
*post_div_p = best_post_div; |
} |
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) |
{ |
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); |
590,6 → 590,7 |
.create_handle = radeon_user_framebuffer_create_handle, |
}; |
struct drm_framebuffer * |
radeon_framebuffer_create(struct drm_device *dev, |
struct drm_mode_fb_cmd *mode_cmd, |
621,90 → 622,17 |
// return radeon_framebuffer_create(dev, mode_cmd, obj); |
} |
static const struct drm_mode_config_funcs radeon_mode_funcs = { |
// .fb_create = radeon_user_framebuffer_create, |
.fb_changed = radeonfb_probe, |
}; |
struct drm_prop_enum_list { |
int type; |
char *name; |
}; |
static struct drm_prop_enum_list radeon_tmds_pll_enum_list[] = |
{ { 0, "driver" }, |
{ 1, "bios" }, |
}; |
static struct drm_prop_enum_list radeon_tv_std_enum_list[] = |
{ { TV_STD_NTSC, "ntsc" }, |
{ TV_STD_PAL, "pal" }, |
{ TV_STD_PAL_M, "pal-m" }, |
{ TV_STD_PAL_60, "pal-60" }, |
{ TV_STD_NTSC_J, "ntsc-j" }, |
{ TV_STD_SCART_PAL, "scart-pal" }, |
{ TV_STD_PAL_CN, "pal-cn" }, |
{ TV_STD_SECAM, "secam" }, |
}; |
int radeon_modeset_create_props(struct radeon_device *rdev) |
int radeon_modeset_init(struct radeon_device *rdev) |
{ |
int i, sz; |
if (rdev->is_atom_bios) { |
rdev->mode_info.coherent_mode_property = |
drm_property_create(rdev->ddev, |
DRM_MODE_PROP_RANGE, |
"coherent", 2); |
if (!rdev->mode_info.coherent_mode_property) |
return -ENOMEM; |
dbgprintf("%s\n",__FUNCTION__); |
rdev->mode_info.coherent_mode_property->values[0] = 0; |
rdev->mode_info.coherent_mode_property->values[0] = 1; |
} |
if (!ASIC_IS_AVIVO(rdev)) { |
sz = ARRAY_SIZE(radeon_tmds_pll_enum_list); |
rdev->mode_info.tmds_pll_property = |
drm_property_create(rdev->ddev, |
DRM_MODE_PROP_ENUM, |
"tmds_pll", sz); |
for (i = 0; i < sz; i++) { |
drm_property_add_enum(rdev->mode_info.tmds_pll_property, |
i, |
radeon_tmds_pll_enum_list[i].type, |
radeon_tmds_pll_enum_list[i].name); |
} |
} |
rdev->mode_info.load_detect_property = |
drm_property_create(rdev->ddev, |
DRM_MODE_PROP_RANGE, |
"load detection", 2); |
if (!rdev->mode_info.load_detect_property) |
return -ENOMEM; |
rdev->mode_info.load_detect_property->values[0] = 0; |
rdev->mode_info.load_detect_property->values[0] = 1; |
drm_mode_create_scaling_mode_property(rdev->ddev); |
sz = ARRAY_SIZE(radeon_tv_std_enum_list); |
rdev->mode_info.tv_std_property = |
drm_property_create(rdev->ddev, |
DRM_MODE_PROP_ENUM, |
"tv standard", sz); |
for (i = 0; i < sz; i++) { |
drm_property_add_enum(rdev->mode_info.tv_std_property, |
i, |
radeon_tv_std_enum_list[i].type, |
radeon_tv_std_enum_list[i].name); |
} |
return 0; |
} |
int radeon_modeset_init(struct radeon_device *rdev) |
{ |
int num_crtc = 2, i; |
int ret; |
723,10 → 651,6 |
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; |
ret = radeon_modeset_create_props(rdev); |
if (ret) { |
return ret; |
} |
/* allocate crtcs - TODO single crtc */ |
for (i = 0; i < num_crtc; i++) { |
radeon_crtc_init(rdev->ddev, i); |
738,6 → 662,9 |
return ret; |
} |
drm_helper_initial_config(rdev->ddev); |
dbgprintf("done %s\n",__FUNCTION__); |
return 0; |
} |
749,50 → 676,36 |
} |
} |
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
void radeon_init_disp_bandwidth(struct drm_device *dev) |
{ |
struct drm_device *dev = crtc->dev; |
struct drm_encoder *encoder; |
struct radeon_device *rdev = dev->dev_private; |
struct drm_display_mode *modes[2]; |
int pixel_bytes[2]; |
struct drm_crtc *crtc; |
pixel_bytes[0] = pixel_bytes[1] = 0; |
modes[0] = modes[1] = NULL; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct radeon_encoder *radeon_encoder; |
bool first = true; |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
radeon_encoder = to_radeon_encoder(encoder); |
if (encoder->crtc != crtc) |
continue; |
if (first) { |
radeon_crtc->rmx_type = radeon_encoder->rmx_type; |
memcpy(&radeon_crtc->native_mode, |
&radeon_encoder->native_mode, |
sizeof(struct radeon_native_mode)); |
first = false; |
} else { |
if (radeon_crtc->rmx_type != radeon_encoder->rmx_type) { |
/* WARNING: Right now this can't happen but |
* in the future we need to check that scaling |
* are consistent accross different encoder |
* (ie all encoder can work with the same |
* scaling). |
*/ |
DRM_ERROR("Scaling not consistent accross encoder.\n"); |
return false; |
if (crtc->enabled && crtc->fb) { |
modes[radeon_crtc->crtc_id] = &crtc->mode; |
pixel_bytes[radeon_crtc->crtc_id] = crtc->fb->bits_per_pixel / 8; |
} |
} |
} |
if (radeon_crtc->rmx_type != RMX_OFF) { |
fixed20_12 a, b; |
a.full = rfixed_const(crtc->mode.vdisplay); |
b.full = rfixed_const(radeon_crtc->native_mode.panel_xres); |
radeon_crtc->vsc.full = rfixed_div(a, b); |
a.full = rfixed_const(crtc->mode.hdisplay); |
b.full = rfixed_const(radeon_crtc->native_mode.panel_yres); |
radeon_crtc->hsc.full = rfixed_div(a, b); |
if (ASIC_IS_AVIVO(rdev)) { |
radeon_init_disp_bw_avivo(dev, |
modes[0], |
pixel_bytes[0], |
modes[1], |
pixel_bytes[1]); |
} else { |
radeon_crtc->vsc.full = rfixed_const(1); |
radeon_crtc->hsc.full = rfixed_const(1); |
radeon_init_disp_bw_legacy(dev, |
modes[0], |
pixel_bytes[0], |
modes[1], |
pixel_bytes[1]); |
} |
return true; |
} |
/drivers/video/drm/radeon/radeon_encoders.c |
---|
126,23 → 126,6 |
} |
} |
void radeon_encoder_set_active_device(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_connector *connector; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
if (connector->encoder == encoder) { |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
radeon_encoder->active_device = radeon_encoder->devices & radeon_connector->devices; |
DRM_DEBUG("setting active device to %08x from %08x %08x for encoder %d\n", |
radeon_encoder->active_device, radeon_encoder->devices, |
radeon_connector->devices, encoder->encoder_type); |
} |
} |
} |
static struct drm_connector * |
radeon_get_connector_for_encoder(struct drm_encoder *encoder) |
{ |
171,6 → 154,7 |
if (mode->hdisplay < native_mode->panel_xres || |
mode->vdisplay < native_mode->panel_yres) { |
radeon_encoder->flags |= RADEON_USE_RMX; |
if (ASIC_IS_AVIVO(rdev)) { |
adjusted_mode->hdisplay = native_mode->panel_xres; |
adjusted_mode->vdisplay = native_mode->panel_yres; |
217,8 → 201,11 |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
radeon_encoder->flags &= ~RADEON_USE_RMX; |
drm_mode_set_crtcinfo(adjusted_mode, 0); |
if (radeon_encoder->rmx_type != RMX_OFF) |
240,12 → 227,9 |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
DAC_ENCODER_CONTROL_PS_ALLOCATION args; |
int index = 0, num = 0; |
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
if (dac_info->tv_std) |
tv_std = dac_info->tv_std; |
memset(&args, 0, sizeof(args)); |
switch (radeon_encoder->encoder_id) { |
263,9 → 247,9 |
args.ucAction = action; |
if (radeon_encoder->active_device & (ATOM_DEVICE_CRT_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) |
args.ucDacStandard = ATOM_DAC1_PS2; |
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.ucDacStandard = ATOM_DAC1_CV; |
else { |
switch (tv_std) { |
298,12 → 282,9 |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
TV_ENCODER_CONTROL_PS_ALLOCATION args; |
int index = 0; |
struct radeon_encoder_atom_dac *dac_info = radeon_encoder->enc_priv; |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
if (dac_info->tv_std) |
tv_std = dac_info->tv_std; |
memset(&args, 0, sizeof(args)); |
index = GetIndexIntoMasterTable(COMMAND, TVEncoderControl); |
310,7 → 291,7 |
args.sTVEncoder.ucAction = action; |
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.sTVEncoder.ucTvStandard = ATOM_TV_CV; |
else { |
switch (tv_std) { |
542,7 → 523,6 |
switch (connector->connector_type) { |
case DRM_MODE_CONNECTOR_DVII: |
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */ |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
return ATOM_ENCODER_MODE_HDMI; |
else if (radeon_connector->use_digital) |
552,6 → 532,7 |
break; |
case DRM_MODE_CONNECTOR_DVID: |
case DRM_MODE_CONNECTOR_HDMIA: |
case DRM_MODE_CONNECTOR_HDMIB: |
default: |
if (drm_detect_hdmi_monitor((struct edid *)connector->edid_blob_ptr)) |
return ATOM_ENCODER_MODE_HDMI; |
827,6 → 808,234 |
} |
static void atom_rv515_force_tv_scaler(struct radeon_device *rdev) |
{ |
WREG32(0x659C, 0x0); |
WREG32(0x6594, 0x705); |
WREG32(0x65A4, 0x10001); |
WREG32(0x65D8, 0x0); |
WREG32(0x65B0, 0x0); |
WREG32(0x65C0, 0x0); |
WREG32(0x65D4, 0x0); |
WREG32(0x6578, 0x0); |
WREG32(0x657C, 0x841880A8); |
WREG32(0x6578, 0x1); |
WREG32(0x657C, 0x84208680); |
WREG32(0x6578, 0x2); |
WREG32(0x657C, 0xBFF880B0); |
WREG32(0x6578, 0x100); |
WREG32(0x657C, 0x83D88088); |
WREG32(0x6578, 0x101); |
WREG32(0x657C, 0x84608680); |
WREG32(0x6578, 0x102); |
WREG32(0x657C, 0xBFF080D0); |
WREG32(0x6578, 0x200); |
WREG32(0x657C, 0x83988068); |
WREG32(0x6578, 0x201); |
WREG32(0x657C, 0x84A08680); |
WREG32(0x6578, 0x202); |
WREG32(0x657C, 0xBFF080F8); |
WREG32(0x6578, 0x300); |
WREG32(0x657C, 0x83588058); |
WREG32(0x6578, 0x301); |
WREG32(0x657C, 0x84E08660); |
WREG32(0x6578, 0x302); |
WREG32(0x657C, 0xBFF88120); |
WREG32(0x6578, 0x400); |
WREG32(0x657C, 0x83188040); |
WREG32(0x6578, 0x401); |
WREG32(0x657C, 0x85008660); |
WREG32(0x6578, 0x402); |
WREG32(0x657C, 0xBFF88150); |
WREG32(0x6578, 0x500); |
WREG32(0x657C, 0x82D88030); |
WREG32(0x6578, 0x501); |
WREG32(0x657C, 0x85408640); |
WREG32(0x6578, 0x502); |
WREG32(0x657C, 0xBFF88180); |
WREG32(0x6578, 0x600); |
WREG32(0x657C, 0x82A08018); |
WREG32(0x6578, 0x601); |
WREG32(0x657C, 0x85808620); |
WREG32(0x6578, 0x602); |
WREG32(0x657C, 0xBFF081B8); |
WREG32(0x6578, 0x700); |
WREG32(0x657C, 0x82608010); |
WREG32(0x6578, 0x701); |
WREG32(0x657C, 0x85A08600); |
WREG32(0x6578, 0x702); |
WREG32(0x657C, 0x800081F0); |
WREG32(0x6578, 0x800); |
WREG32(0x657C, 0x8228BFF8); |
WREG32(0x6578, 0x801); |
WREG32(0x657C, 0x85E085E0); |
WREG32(0x6578, 0x802); |
WREG32(0x657C, 0xBFF88228); |
WREG32(0x6578, 0x10000); |
WREG32(0x657C, 0x82A8BF00); |
WREG32(0x6578, 0x10001); |
WREG32(0x657C, 0x82A08CC0); |
WREG32(0x6578, 0x10002); |
WREG32(0x657C, 0x8008BEF8); |
WREG32(0x6578, 0x10100); |
WREG32(0x657C, 0x81F0BF28); |
WREG32(0x6578, 0x10101); |
WREG32(0x657C, 0x83608CA0); |
WREG32(0x6578, 0x10102); |
WREG32(0x657C, 0x8018BED0); |
WREG32(0x6578, 0x10200); |
WREG32(0x657C, 0x8148BF38); |
WREG32(0x6578, 0x10201); |
WREG32(0x657C, 0x84408C80); |
WREG32(0x6578, 0x10202); |
WREG32(0x657C, 0x8008BEB8); |
WREG32(0x6578, 0x10300); |
WREG32(0x657C, 0x80B0BF78); |
WREG32(0x6578, 0x10301); |
WREG32(0x657C, 0x85008C20); |
WREG32(0x6578, 0x10302); |
WREG32(0x657C, 0x8020BEA0); |
WREG32(0x6578, 0x10400); |
WREG32(0x657C, 0x8028BF90); |
WREG32(0x6578, 0x10401); |
WREG32(0x657C, 0x85E08BC0); |
WREG32(0x6578, 0x10402); |
WREG32(0x657C, 0x8018BE90); |
WREG32(0x6578, 0x10500); |
WREG32(0x657C, 0xBFB8BFB0); |
WREG32(0x6578, 0x10501); |
WREG32(0x657C, 0x86C08B40); |
WREG32(0x6578, 0x10502); |
WREG32(0x657C, 0x8010BE90); |
WREG32(0x6578, 0x10600); |
WREG32(0x657C, 0xBF58BFC8); |
WREG32(0x6578, 0x10601); |
WREG32(0x657C, 0x87A08AA0); |
WREG32(0x6578, 0x10602); |
WREG32(0x657C, 0x8010BE98); |
WREG32(0x6578, 0x10700); |
WREG32(0x657C, 0xBF10BFF0); |
WREG32(0x6578, 0x10701); |
WREG32(0x657C, 0x886089E0); |
WREG32(0x6578, 0x10702); |
WREG32(0x657C, 0x8018BEB0); |
WREG32(0x6578, 0x10800); |
WREG32(0x657C, 0xBED8BFE8); |
WREG32(0x6578, 0x10801); |
WREG32(0x657C, 0x89408940); |
WREG32(0x6578, 0x10802); |
WREG32(0x657C, 0xBFE8BED8); |
WREG32(0x6578, 0x20000); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20001); |
WREG32(0x657C, 0x90008000); |
WREG32(0x6578, 0x20002); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20003); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20100); |
WREG32(0x657C, 0x80108000); |
WREG32(0x6578, 0x20101); |
WREG32(0x657C, 0x8FE0BF70); |
WREG32(0x6578, 0x20102); |
WREG32(0x657C, 0xBFE880C0); |
WREG32(0x6578, 0x20103); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20200); |
WREG32(0x657C, 0x8018BFF8); |
WREG32(0x6578, 0x20201); |
WREG32(0x657C, 0x8F80BF08); |
WREG32(0x6578, 0x20202); |
WREG32(0x657C, 0xBFD081A0); |
WREG32(0x6578, 0x20203); |
WREG32(0x657C, 0xBFF88000); |
WREG32(0x6578, 0x20300); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20301); |
WREG32(0x657C, 0x8EE0BEC0); |
WREG32(0x6578, 0x20302); |
WREG32(0x657C, 0xBFB082A0); |
WREG32(0x6578, 0x20303); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20400); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20401); |
WREG32(0x657C, 0x8E00BEA0); |
WREG32(0x6578, 0x20402); |
WREG32(0x657C, 0xBF8883C0); |
WREG32(0x6578, 0x20403); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x20500); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20501); |
WREG32(0x657C, 0x8D00BE90); |
WREG32(0x6578, 0x20502); |
WREG32(0x657C, 0xBF588500); |
WREG32(0x6578, 0x20503); |
WREG32(0x657C, 0x80008008); |
WREG32(0x6578, 0x20600); |
WREG32(0x657C, 0x80188000); |
WREG32(0x6578, 0x20601); |
WREG32(0x657C, 0x8BC0BE98); |
WREG32(0x6578, 0x20602); |
WREG32(0x657C, 0xBF308660); |
WREG32(0x6578, 0x20603); |
WREG32(0x657C, 0x80008008); |
WREG32(0x6578, 0x20700); |
WREG32(0x657C, 0x80108000); |
WREG32(0x6578, 0x20701); |
WREG32(0x657C, 0x8A80BEB0); |
WREG32(0x6578, 0x20702); |
WREG32(0x657C, 0xBF0087C0); |
WREG32(0x6578, 0x20703); |
WREG32(0x657C, 0x80008008); |
WREG32(0x6578, 0x20800); |
WREG32(0x657C, 0x80108000); |
WREG32(0x6578, 0x20801); |
WREG32(0x657C, 0x8920BED0); |
WREG32(0x6578, 0x20802); |
WREG32(0x657C, 0xBED08920); |
WREG32(0x6578, 0x20803); |
WREG32(0x657C, 0x80008010); |
WREG32(0x6578, 0x30000); |
WREG32(0x657C, 0x90008000); |
WREG32(0x6578, 0x30001); |
WREG32(0x657C, 0x80008000); |
WREG32(0x6578, 0x30100); |
WREG32(0x657C, 0x8FE0BF90); |
WREG32(0x6578, 0x30101); |
WREG32(0x657C, 0xBFF880A0); |
WREG32(0x6578, 0x30200); |
WREG32(0x657C, 0x8F60BF40); |
WREG32(0x6578, 0x30201); |
WREG32(0x657C, 0xBFE88180); |
WREG32(0x6578, 0x30300); |
WREG32(0x657C, 0x8EC0BF00); |
WREG32(0x6578, 0x30301); |
WREG32(0x657C, 0xBFC88280); |
WREG32(0x6578, 0x30400); |
WREG32(0x657C, 0x8DE0BEE0); |
WREG32(0x6578, 0x30401); |
WREG32(0x657C, 0xBFA083A0); |
WREG32(0x6578, 0x30500); |
WREG32(0x657C, 0x8CE0BED0); |
WREG32(0x6578, 0x30501); |
WREG32(0x657C, 0xBF7884E0); |
WREG32(0x6578, 0x30600); |
WREG32(0x657C, 0x8BA0BED8); |
WREG32(0x6578, 0x30601); |
WREG32(0x657C, 0xBF508640); |
WREG32(0x6578, 0x30700); |
WREG32(0x657C, 0x8A60BEE8); |
WREG32(0x6578, 0x30701); |
WREG32(0x657C, 0xBF2087A0); |
WREG32(0x6578, 0x30800); |
WREG32(0x657C, 0x8900BF00); |
WREG32(0x6578, 0x30801); |
WREG32(0x657C, 0xBF008900); |
} |
static void |
atombios_yuv_setup(struct drm_encoder *encoder, bool enable) |
{ |
847,10 → 1056,10 |
/* XXX: fix up scratch reg handling */ |
temp = RREG32(reg); |
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
WREG32(reg, (ATOM_S3_TV1_ACTIVE | |
(radeon_crtc->crtc_id << 18))); |
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
WREG32(reg, (ATOM_S3_CV_ACTIVE | (radeon_crtc->crtc_id << 24))); |
else |
WREG32(reg, 0); |
865,6 → 1074,129 |
} |
static void |
atombios_overscan_setup(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
SET_CRTC_OVERSCAN_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, SetCRTC_OverScan); |
memset(&args, 0, sizeof(args)); |
args.usOverscanRight = 0; |
args.usOverscanLeft = 0; |
args.usOverscanBottom = 0; |
args.usOverscanTop = 0; |
args.ucCRTC = radeon_crtc->crtc_id; |
if (radeon_encoder->flags & RADEON_USE_RMX) { |
if (radeon_encoder->rmx_type == RMX_FULL) { |
args.usOverscanRight = 0; |
args.usOverscanLeft = 0; |
args.usOverscanBottom = 0; |
args.usOverscanTop = 0; |
} else if (radeon_encoder->rmx_type == RMX_CENTER) { |
args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; |
args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; |
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; |
} else if (radeon_encoder->rmx_type == RMX_ASPECT) { |
int a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; |
int a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; |
if (a1 > a2) { |
args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; |
} else if (a2 > a1) { |
args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; |
args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; |
} |
} |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
} |
static void |
atombios_scaler_setup(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
ENABLE_SCALER_PS_ALLOCATION args; |
int index = GetIndexIntoMasterTable(COMMAND, EnableScaler); |
/* fixme - fill in enc_priv for atom dac */ |
enum radeon_tv_std tv_std = TV_STD_NTSC; |
if (!ASIC_IS_AVIVO(rdev) && radeon_crtc->crtc_id) |
return; |
memset(&args, 0, sizeof(args)); |
args.ucScaler = radeon_crtc->crtc_id; |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) { |
switch (tv_std) { |
case TV_STD_NTSC: |
default: |
args.ucTVStandard = ATOM_TV_NTSC; |
break; |
case TV_STD_PAL: |
args.ucTVStandard = ATOM_TV_PAL; |
break; |
case TV_STD_PAL_M: |
args.ucTVStandard = ATOM_TV_PALM; |
break; |
case TV_STD_PAL_60: |
args.ucTVStandard = ATOM_TV_PAL60; |
break; |
case TV_STD_NTSC_J: |
args.ucTVStandard = ATOM_TV_NTSCJ; |
break; |
case TV_STD_SCART_PAL: |
args.ucTVStandard = ATOM_TV_PAL; /* ??? */ |
break; |
case TV_STD_SECAM: |
args.ucTVStandard = ATOM_TV_SECAM; |
break; |
case TV_STD_PAL_CN: |
args.ucTVStandard = ATOM_TV_PALCN; |
break; |
} |
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; |
} else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) { |
args.ucTVStandard = ATOM_TV_CV; |
args.ucEnable = SCALER_ENABLE_MULTITAP_MODE; |
} else if (radeon_encoder->flags & RADEON_USE_RMX) { |
if (radeon_encoder->rmx_type == RMX_FULL) |
args.ucEnable = ATOM_SCALER_EXPANSION; |
else if (radeon_encoder->rmx_type == RMX_CENTER) |
args.ucEnable = ATOM_SCALER_CENTER; |
else if (radeon_encoder->rmx_type == RMX_ASPECT) |
args.ucEnable = ATOM_SCALER_EXPANSION; |
} else { |
if (ASIC_IS_AVIVO(rdev)) |
args.ucEnable = ATOM_SCALER_DISABLE; |
else |
args.ucEnable = ATOM_SCALER_CENTER; |
} |
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT) |
&& rdev->family >= CHIP_RV515 && rdev->family <= CHIP_RV570) { |
atom_rv515_force_tv_scaler(rdev); |
} |
} |
static void |
radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) |
{ |
struct drm_device *dev = encoder->dev; |
873,19 → 1205,9 |
DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION args; |
int index = 0; |
bool is_dig = false; |
int devices; |
memset(&args, 0, sizeof(args)); |
/* on DPMS off we have no idea if active device is meaningful */ |
if (mode != DRM_MODE_DPMS_ON && !radeon_encoder->active_device) |
devices = radeon_encoder->devices; |
else |
devices = radeon_encoder->active_device; |
DRM_DEBUG("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n", |
radeon_encoder->encoder_id, mode, radeon_encoder->devices, |
radeon_encoder->active_device); |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1: |
913,9 → 1235,9 |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
if (devices & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
else if (devices & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, DAC1OutputControl); |
922,9 → 1244,9 |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
if (devices & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, TV1OutputControl); |
else if (devices & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
index = GetIndexIntoMasterTable(COMMAND, CV1OutputControl); |
else |
index = GetIndexIntoMasterTable(COMMAND, DAC2OutputControl); |
1011,9 → 1333,9 |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; |
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; |
else |
args.v1.ucDevice = ATOM_DEVICE_CRT1_INDEX; |
1020,9 → 1342,9 |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_TV1_INDEX; |
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v1.ucDevice = ATOM_DEVICE_CV_INDEX; |
else |
args.v1.ucDevice = ATOM_DEVICE_CRT2_INDEX; |
1051,17 → 1373,17 |
args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else |
args.v2.ucEncoderID = ASIC_INT_DAC1_ENCODER_ID; |
break; |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT)) |
else if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT)) |
args.v2.ucEncoderID = ASIC_INT_TV_ENCODER_ID; |
else |
args.v2.ucEncoderID = ASIC_INT_DAC2_ENCODER_ID; |
1126,10 → 1448,12 |
radeon_encoder->pixel_clock = adjusted_mode->clock; |
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
atombios_overscan_setup(encoder, mode, adjusted_mode); |
atombios_scaler_setup(encoder); |
atombios_set_encoder_crtc_source(encoder); |
if (ASIC_IS_AVIVO(rdev)) { |
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) |
atombios_yuv_setup(encoder, true); |
else |
atombios_yuv_setup(encoder, false); |
1167,7 → 1491,7 |
case ENCODER_OBJECT_ID_INTERNAL_DAC2: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
atombios_dac_setup(encoder, ATOM_ENABLE); |
if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) |
atombios_tv_setup(encoder, ATOM_ENABLE); |
break; |
} |
1175,12 → 1499,11 |
} |
static bool |
atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *connector) |
atombios_dac_load_detect(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
if (radeon_encoder->devices & (ATOM_DEVICE_TV_SUPPORT | |
ATOM_DEVICE_CV_SUPPORT | |
1201,15 → 1524,15 |
else |
args.sDacload.ucDacType = ATOM_DAC_B; |
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) |
if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT1_SUPPORT); |
else if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) |
else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CRT2_SUPPORT); |
else if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { |
else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_CV_SUPPORT); |
if (crev >= 3) |
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; |
} else if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { |
} else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { |
args.sDacload.usDeviceID = cpu_to_le16(ATOM_DEVICE_TV1_SUPPORT); |
if (crev >= 3) |
args.sDacload.ucMisc = DAC_LOAD_MISC_YPrPb; |
1228,10 → 1551,9 |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_connector *radeon_connector = to_radeon_connector(connector); |
uint32_t bios_0_scratch; |
if (!atombios_dac_load_detect(encoder, connector)) { |
if (!atombios_dac_load_detect(encoder)) { |
DRM_DEBUG("detect returned false \n"); |
return connector_status_unknown; |
} |
1241,20 → 1563,17 |
else |
bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); |
DRM_DEBUG("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices); |
if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) { |
DRM_DEBUG("Bios 0 scratch %x\n", bios_0_scratch); |
if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { |
if (bios_0_scratch & ATOM_S0_CRT1_MASK) |
return connector_status_connected; |
} |
if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) { |
} else if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { |
if (bios_0_scratch & ATOM_S0_CRT2_MASK) |
return connector_status_connected; |
} |
if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) { |
} else if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { |
if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A)) |
return connector_status_connected; |
} |
if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) { |
} else if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { |
if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A)) |
return connector_status_connected; /* CTV */ |
else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A)) |
1267,8 → 1586,6 |
{ |
radeon_atom_output_lock(encoder, true); |
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder_set_active_device(encoder); |
} |
static void radeon_atom_encoder_commit(struct drm_encoder *encoder) |
1277,13 → 1594,6 |
radeon_atom_output_lock(encoder, false); |
} |
static void radeon_atom_encoder_disable(struct drm_encoder *encoder) |
{ |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder->active_device = 0; |
} |
static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = { |
.dpms = radeon_atom_encoder_dpms, |
.mode_fixup = radeon_atom_mode_fixup, |
1290,7 → 1600,6 |
.prepare = radeon_atom_encoder_prepare, |
.mode_set = radeon_atom_encoder_mode_set, |
.commit = radeon_atom_encoder_commit, |
.disable = radeon_atom_encoder_disable, |
/* no detect for TMDS/LVDS yet */ |
}; |
1315,18 → 1624,6 |
.destroy = radeon_enc_destroy, |
}; |
struct radeon_encoder_atom_dac * |
radeon_atombios_set_dac_info(struct radeon_encoder *radeon_encoder) |
{ |
struct radeon_encoder_atom_dac *dac = kzalloc(sizeof(struct radeon_encoder_atom_dac), GFP_KERNEL); |
if (!dac) |
return NULL; |
dac->tv_std = TV_STD_NTSC; |
return dac; |
} |
struct radeon_encoder_atom_dig * |
radeon_atombios_set_dig_info(struct radeon_encoder *radeon_encoder) |
{ |
1370,7 → 1667,6 |
radeon_encoder->encoder_id = encoder_id; |
radeon_encoder->devices = supported_device; |
radeon_encoder->rmx_type = RMX_OFF; |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
1395,7 → 1691,6 |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TVDAC); |
radeon_encoder->enc_priv = radeon_atombios_set_dac_info(radeon_encoder); |
drm_encoder_helper_add(encoder, &radeon_atom_dac_helper_funcs); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DVO1: |
1405,14 → 1700,8 |
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: |
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
radeon_encoder->rmx_type = RMX_FULL; |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_LVDS); |
radeon_encoder->enc_priv = radeon_atombios_get_lvds_info(radeon_encoder); |
} else { |
drm_encoder_init(dev, encoder, &radeon_atom_enc_funcs, DRM_MODE_ENCODER_TMDS); |
radeon_encoder->enc_priv = radeon_atombios_set_dig_info(radeon_encoder); |
} |
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs); |
break; |
} |
/drivers/video/drm/radeon/radeon_gem.c |
---|
182,9 → 182,9 |
struct radeon_device *rdev = dev->dev_private; |
struct drm_radeon_gem_info *args = data; |
args->vram_size = rdev->mc.real_vram_size; |
args->vram_size = rdev->mc.vram_size; |
/* FIXME: report somethings that makes sense */ |
args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024); |
args->vram_visible = rdev->mc.vram_size - (4 * 1024 * 1024); |
args->gart_size = rdev->mc.gtt_size; |
return 0; |
} |
/drivers/video/drm/radeon/atikms.lds |
---|
20,21 → 20,11 |
*(.data) |
} |
.bss ALIGN(__section_alignment__): |
.reloc ALIGN(__section_alignment__) : |
{ |
*(.bss) |
*(COMMON) |
*(.reloc) |
} |
/DISCARD/ : |
{ |
*(.debug$S) |
*(.debug$T) |
*(.debug$F) |
*(.drectve) |
*(.edata) |
} |
.idata ALIGN(__section_alignment__): |
{ |
SORT(*)(.idata$2) |
47,10 → 37,19 |
SORT(*)(.idata$7) |
} |
.reloc ALIGN(__section_alignment__) : |
.bss ALIGN(__section_alignment__): |
{ |
*(.reloc) |
*(.bss) |
*(COMMON) |
} |
/DISCARD/ : |
{ |
*(.debug$S) |
*(.debug$T) |
*(.debug$F) |
*(.drectve) |
*(.edata) |
} |
} |
/drivers/video/drm/radeon/radeon_fence.c |
---|
53,9 → 53,9 |
* away |
*/ |
WREG32(rdev->fence_drv.scratch_reg, fence->seq); |
} else |
} else { |
radeon_fence_ring_emit(rdev, fence); |
} |
fence->emited = true; |
fence->timeout = jiffies + ((2000 * HZ) / 1000); |
list_del(&fence->list); |
168,40 → 168,9 |
return signaled; |
} |
int r600_fence_wait(struct radeon_fence *fence, bool intr, bool lazy) |
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible) |
{ |
struct radeon_device *rdev; |
int ret = 0; |
rdev = fence->rdev; |
__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
while (1) { |
if (radeon_fence_signaled(fence)) |
break; |
if (time_after_eq(jiffies, fence->timeout)) { |
ret = -EBUSY; |
break; |
} |
if (lazy) |
schedule_timeout(1); |
if (intr && signal_pending(current)) { |
ret = -ERESTARTSYS; |
break; |
} |
} |
__set_current_state(TASK_RUNNING); |
return ret; |
} |
int radeon_fence_wait(struct radeon_fence *fence, bool intr) |
{ |
struct radeon_device *rdev; |
unsigned long cur_jiffies; |
unsigned long timeout; |
bool expired = false; |
216,14 → 185,6 |
if (radeon_fence_signaled(fence)) { |
return 0; |
} |
if (rdev->family >= CHIP_R600) { |
r = r600_fence_wait(fence, intr, 0); |
if (r == -ERESTARTSYS) |
return -EBUSY; |
return r; |
} |
retry: |
cur_jiffies = jiffies; |
timeout = HZ / 100; |
230,11 → 191,11 |
if (time_after(fence->timeout, cur_jiffies)) { |
timeout = fence->timeout - cur_jiffies; |
} |
if (intr) { |
if (interruptible) { |
r = wait_event_interruptible_timeout(rdev->fence_drv.queue, |
radeon_fence_signaled(fence), timeout); |
if (unlikely(r == -ERESTARTSYS)) { |
return -EBUSY; |
return -ERESTART; |
} |
} else { |
r = wait_event_timeout(rdev->fence_drv.queue, |
/drivers/video/drm/radeon/radeon_mode.h |
---|
34,12 → 34,8 |
#include <drm_mode.h> |
#include <drm_edid.h> |
#include <linux/i2c.h> |
#include <linux/i2c-id.h> |
#include <linux/i2c-algo-bit.h> |
#include "radeon_fixed.h" |
struct radeon_device; |
#define to_radeon_crtc(x) container_of(x, struct radeon_crtc, base) |
#define to_radeon_connector(x) container_of(x, struct radeon_connector, base) |
#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) |
127,7 → 123,6 |
#define RADEON_PLL_PREFER_LOW_POST_DIV (1 << 8) |
#define RADEON_PLL_PREFER_HIGH_POST_DIV (1 << 9) |
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10) |
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11) |
struct radeon_pll { |
uint16_t reference_freq; |
174,18 → 169,25 |
struct atom_context *atom_context; |
enum radeon_connector_table connector_table; |
bool mode_config_initialized; |
struct radeon_crtc *crtcs[2]; |
/* DVI-I properties */ |
struct drm_property *coherent_mode_property; |
/* DAC enable load detect */ |
struct drm_property *load_detect_property; |
/* TV standard load detect */ |
struct drm_property *tv_std_property; |
/* legacy TMDS PLL detect */ |
struct drm_property *tmds_pll_property; |
}; |
struct radeon_crtc { |
struct drm_crtc base; |
int crtc_id; |
u16_t lut_r[256], lut_g[256], lut_b[256]; |
bool enabled; |
bool can_tile; |
uint32_t crtc_offset; |
struct radeon_framebuffer *fbdev_fb; |
struct drm_mode_set mode_set; |
// struct drm_gem_object *cursor_bo; |
uint64_t cursor_addr; |
int cursor_width; |
int cursor_height; |
}; |
#define RADEON_USE_RMX 1 |
struct radeon_native_mode { |
/* preferred mode */ |
uint32_t panel_xres, panel_yres; |
197,40 → 199,6 |
uint32_t flags; |
}; |
#define MAX_H_CODE_TIMING_LEN 32 |
#define MAX_V_CODE_TIMING_LEN 32 |
/* need to store these as reading |
back code tables is excessive */ |
struct radeon_tv_regs { |
uint32_t tv_uv_adr; |
uint32_t timing_cntl; |
uint32_t hrestart; |
uint32_t vrestart; |
uint32_t frestart; |
uint16_t h_code_timing[MAX_H_CODE_TIMING_LEN]; |
uint16_t v_code_timing[MAX_V_CODE_TIMING_LEN]; |
}; |
struct radeon_crtc { |
struct drm_crtc base; |
int crtc_id; |
u16 lut_r[256], lut_g[256], lut_b[256]; |
bool enabled; |
bool can_tile; |
uint32_t crtc_offset; |
// struct drm_gem_object *cursor_bo; |
uint64_t cursor_addr; |
int cursor_width; |
int cursor_height; |
uint32_t legacy_display_base_addr; |
uint32_t legacy_cursor_offset; |
enum radeon_rmx_type rmx_type; |
fixed20_12 vsc; |
fixed20_12 hsc; |
struct radeon_native_mode native_mode; |
}; |
struct radeon_encoder_primary_dac { |
/* legacy primary dac */ |
uint32_t ps2_pdac_adj; |
257,13 → 225,7 |
uint32_t ntsc_tvdac_adj; |
uint32_t pal_tvdac_adj; |
int h_pos; |
int v_pos; |
int h_size; |
int supported_tv_stds; |
bool tv_on; |
enum radeon_tv_std tv_std; |
struct radeon_tv_regs tv; |
}; |
struct radeon_encoder_int_tmds { |
282,15 → 244,10 |
struct radeon_native_mode native_mode; |
}; |
struct radeon_encoder_atom_dac { |
enum radeon_tv_std tv_std; |
}; |
struct radeon_encoder { |
struct drm_encoder base; |
uint32_t encoder_id; |
uint32_t devices; |
uint32_t active_device; |
uint32_t flags; |
uint32_t pixel_clock; |
enum radeon_rmx_type rmx_type; |
308,12 → 265,8 |
uint32_t connector_id; |
uint32_t devices; |
struct radeon_i2c_chan *ddc_bus; |
bool use_digital; |
/* we need to mind the EDID between detect |
and get modes due to analog/digital/tvencoder */ |
struct edid *edid; |
int use_digital; |
void *con_priv; |
bool dac_load_detect; |
}; |
struct radeon_framebuffer { |
346,7 → 299,6 |
struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index); |
extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action); |
extern int atombios_get_encoder_mode(struct drm_encoder *encoder); |
extern void radeon_encoder_set_active_device(struct drm_encoder *encoder); |
extern void radeon_crtc_load_lut(struct drm_crtc *crtc); |
extern int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y, |
374,12 → 326,8 |
extern bool radeon_combios_get_clock_info(struct drm_device *dev); |
extern struct radeon_encoder_atom_dig * |
radeon_atombios_get_lvds_info(struct radeon_encoder *encoder); |
bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, |
struct radeon_encoder_int_tmds *tmds); |
bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, |
struct radeon_encoder_int_tmds *tmds); |
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, |
struct radeon_encoder_int_tmds *tmds); |
extern struct radeon_encoder_int_tmds * |
radeon_atombios_get_tmds_info(struct radeon_encoder *encoder); |
extern struct radeon_encoder_primary_dac * |
radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder); |
extern struct radeon_encoder_tv_dac * |
386,6 → 334,8 |
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder); |
extern struct radeon_encoder_lvds * |
radeon_combios_get_lvds_info(struct radeon_encoder *encoder); |
extern struct radeon_encoder_int_tmds * |
radeon_combios_get_tmds_info(struct radeon_encoder *encoder); |
extern void radeon_combios_get_ext_tmds_info(struct radeon_encoder *encoder); |
extern struct radeon_encoder_tv_dac * |
radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder); |
395,8 → 345,6 |
extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev); |
extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock); |
extern void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev); |
extern void radeon_save_bios_scratch_regs(struct radeon_device *rdev); |
extern void radeon_restore_bios_scratch_regs(struct radeon_device *rdev); |
extern void |
radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc); |
extern void |
434,22 → 382,16 |
void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj); |
void radeon_combios_asic_init(struct drm_device *dev); |
extern int radeon_static_clocks_init(struct drm_device *dev); |
bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
void atom_rv515_force_tv_scaler(struct radeon_device *rdev, struct radeon_crtc *radeon_crtc); |
void radeon_init_disp_bw_legacy(struct drm_device *dev, |
struct drm_display_mode *mode1, |
uint32_t pixel_bytes1, |
struct drm_display_mode *mode2, |
uint32_t pixel_bytes2); |
void radeon_init_disp_bw_avivo(struct drm_device *dev, |
struct drm_display_mode *mode1, |
uint32_t pixel_bytes1, |
struct drm_display_mode *mode2, |
uint32_t pixel_bytes2); |
void radeon_init_disp_bandwidth(struct drm_device *dev); |
/* legacy tv */ |
void radeon_legacy_tv_adjust_crtc_reg(struct drm_encoder *encoder, |
uint32_t *h_total_disp, uint32_t *h_sync_strt_wid, |
uint32_t *v_total_disp, uint32_t *v_sync_strt_wid); |
void radeon_legacy_tv_adjust_pll1(struct drm_encoder *encoder, |
uint32_t *htotal_cntl, uint32_t *ppll_ref_div, |
uint32_t *ppll_div_3, uint32_t *pixclks_cntl); |
void radeon_legacy_tv_adjust_pll2(struct drm_encoder *encoder, |
uint32_t *htotal2_cntl, uint32_t *p2pll_ref_div, |
uint32_t *p2pll_div_0, uint32_t *pixclks_cntl); |
void radeon_legacy_tv_mode_set(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode); |
#endif |
/drivers/video/drm/radeon/radeon_combios.c |
---|
685,15 → 685,23 |
0x00780000, /* rs480 */ |
}; |
static void radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev, |
struct radeon_encoder_tv_dac *tv_dac) |
static struct radeon_encoder_tv_dac |
*radeon_legacy_get_tv_dac_info_from_table(struct radeon_device *rdev) |
{ |
struct radeon_encoder_tv_dac *tv_dac = NULL; |
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); |
if (!tv_dac) |
return NULL; |
tv_dac->ps2_tvdac_adj = default_tvdac_adj[rdev->family]; |
if ((rdev->flags & RADEON_IS_MOBILITY) && (rdev->family == CHIP_RV250)) |
tv_dac->ps2_tvdac_adj = 0x00880000; |
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
return; |
return tv_dac; |
} |
struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct |
705,18 → 713,19 |
uint16_t dac_info; |
uint8_t rev, bg, dac; |
struct radeon_encoder_tv_dac *tv_dac = NULL; |
int found = 0; |
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); |
if (!tv_dac) |
return NULL; |
if (rdev->bios == NULL) |
goto out; |
return radeon_legacy_get_tv_dac_info_from_table(rdev); |
/* first check TV table */ |
dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE); |
if (dac_info) { |
tv_dac = |
kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); |
if (!tv_dac) |
return NULL; |
rev = RBIOS8(dac_info + 0x3); |
if (rev > 4) { |
bg = RBIOS8(dac_info + 0xc) & 0xf; |
730,7 → 739,6 |
bg = RBIOS8(dac_info + 0x10) & 0xf; |
dac = RBIOS8(dac_info + 0x11) & 0xf; |
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
found = 1; |
} else if (rev > 1) { |
bg = RBIOS8(dac_info + 0xc) & 0xf; |
dac = (RBIOS8(dac_info + 0xc) >> 4) & 0xf; |
743,16 → 751,22 |
bg = RBIOS8(dac_info + 0xe) & 0xf; |
dac = (RBIOS8(dac_info + 0xe) >> 4) & 0xf; |
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
found = 1; |
} |
tv_dac->tv_std = radeon_combios_get_tv_info(encoder); |
} |
if (!found) { |
} else { |
/* then check CRT table */ |
dac_info = |
combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE); |
if (dac_info) { |
tv_dac = |
kzalloc(sizeof(struct radeon_encoder_tv_dac), |
GFP_KERNEL); |
if (!tv_dac) |
return NULL; |
rev = RBIOS8(dac_info) & 0x3; |
if (rev < 2) { |
bg = RBIOS8(dac_info + 0x3) & 0xf; |
761,7 → 775,6 |
(bg << 16) | (dac << 20); |
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
found = 1; |
} else { |
bg = RBIOS8(dac_info + 0x4) & 0xf; |
dac = RBIOS8(dac_info + 0x5) & 0xf; |
769,15 → 782,12 |
(bg << 16) | (dac << 20); |
tv_dac->pal_tvdac_adj = tv_dac->ps2_tvdac_adj; |
tv_dac->ntsc_tvdac_adj = tv_dac->ps2_tvdac_adj; |
found = 1; |
} |
} else { |
DRM_INFO("No TV DAC info found in BIOS\n"); |
return radeon_legacy_get_tv_dac_info_from_table(rdev); |
} |
} |
out: |
if (!found) /* fallback to defaults */ |
radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac); |
return tv_dac; |
} |
863,10 → 873,8 |
int tmp, i; |
struct radeon_encoder_lvds *lvds = NULL; |
if (rdev->bios == NULL) { |
lvds = radeon_legacy_get_lvds_info_from_regs(rdev); |
goto out; |
} |
if (rdev->bios == NULL) |
return radeon_legacy_get_lvds_info_from_regs(rdev); |
lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE); |
967,13 → 975,11 |
lvds->native_mode.flags = 0; |
} |
} |
encoder->native_mode = lvds->native_mode; |
} else { |
DRM_INFO("No panel info found in BIOS\n"); |
lvds = radeon_legacy_get_lvds_info_from_regs(rdev); |
return radeon_legacy_get_lvds_info_from_regs(rdev); |
} |
out: |
if (lvds) |
encoder->native_mode = lvds->native_mode; |
return lvds; |
} |
998,13 → 1004,17 |
{{15000, 0xb0155}, {0xffffffff, 0xb01cb}, {0, 0}, {0, 0}}, /* CHIP_RS480 */ |
}; |
bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder, |
struct radeon_encoder_int_tmds *tmds) |
static struct radeon_encoder_int_tmds |
*radeon_legacy_get_tmds_info_from_table(struct radeon_device *rdev) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct radeon_device *rdev = dev->dev_private; |
int i; |
struct radeon_encoder_int_tmds *tmds = NULL; |
tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); |
if (!tmds) |
return NULL; |
for (i = 0; i < 4; i++) { |
tmds->tmds_pll[i].value = |
default_tmds_pll[rdev->family][i].value; |
1011,11 → 1021,12 |
tmds->tmds_pll[i].freq = default_tmds_pll[rdev->family][i].freq; |
} |
return true; |
return tmds; |
} |
bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder, |
struct radeon_encoder_int_tmds *tmds) |
struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct |
radeon_encoder |
*encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct radeon_device *rdev = dev->dev_private; |
1022,14 → 1033,20 |
uint16_t tmds_info; |
int i, n; |
uint8_t ver; |
struct radeon_encoder_int_tmds *tmds = NULL; |
if (rdev->bios == NULL) |
return false; |
return radeon_legacy_get_tmds_info_from_table(rdev); |
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE); |
if (tmds_info) { |
tmds = |
kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); |
if (!tmds) |
return NULL; |
ver = RBIOS8(tmds_info); |
DRM_INFO("DFP table revision: %d\n", ver); |
if (ver == 3) { |
1066,23 → 1083,6 |
} |
} else |
DRM_INFO("No TMDS info found in BIOS\n"); |
return true; |
} |
struct radeon_encoder_int_tmds *radeon_combios_get_tmds_info(struct radeon_encoder *encoder) |
{ |
struct radeon_encoder_int_tmds *tmds = NULL; |
bool ret; |
tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); |
if (!tmds) |
return NULL; |
ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); |
if (ret == false) |
radeon_legacy_get_tmds_info_from_table(encoder, tmds); |
return tmds; |
} |
/drivers/video/drm/radeon/radeon_legacy_crtc.c |
---|
23,178 → 23,12 |
* Authors: Dave Airlie |
* Alex Deucher |
*/ |
#include <drm/drmP.h> |
#include <drm/drm_crtc_helper.h> |
#include <drm/radeon_drm.h> |
#include <drmP.h> |
#include <drm_crtc_helper.h> |
#include "radeon_drm.h" |
#include "radeon_fixed.h" |
#include "radeon.h" |
#include "atom.h" |
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
int xres = mode->hdisplay; |
int yres = mode->vdisplay; |
bool hscale = true, vscale = true; |
int hsync_wid; |
int vsync_wid; |
int hsync_start; |
int blank_width; |
u32 scale, inc, crtc_more_cntl; |
u32 fp_horz_stretch, fp_vert_stretch, fp_horz_vert_active; |
u32 fp_h_sync_strt_wid, fp_crtc_h_total_disp; |
u32 fp_v_sync_strt_wid, fp_crtc_v_total_disp; |
struct radeon_native_mode *native_mode = &radeon_crtc->native_mode; |
fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & |
(RADEON_VERT_STRETCH_RESERVED | |
RADEON_VERT_AUTO_RATIO_INC); |
fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & |
(RADEON_HORZ_FP_LOOP_STRETCH | |
RADEON_HORZ_AUTO_RATIO_INC); |
crtc_more_cntl = 0; |
if ((rdev->family == CHIP_RS100) || |
(rdev->family == CHIP_RS200)) { |
/* This is to workaround the asic bug for RMX, some versions |
of BIOS dosen't have this register initialized correctly. */ |
crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; |
} |
fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) |
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); |
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; |
if (!hsync_wid) |
hsync_wid = 1; |
hsync_start = mode->crtc_hsync_start - 8; |
fp_h_sync_strt_wid = ((hsync_start & 0x1fff) |
| ((hsync_wid & 0x3f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NHSYNC) |
? RADEON_CRTC_H_SYNC_POL |
: 0)); |
fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) |
| ((mode->crtc_vdisplay - 1) << 16)); |
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; |
if (!vsync_wid) |
vsync_wid = 1; |
fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) |
| ((vsync_wid & 0x1f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NVSYNC) |
? RADEON_CRTC_V_SYNC_POL |
: 0)); |
fp_horz_vert_active = 0; |
if (native_mode->panel_xres == 0 || |
native_mode->panel_yres == 0) { |
hscale = false; |
vscale = false; |
} else { |
if (xres > native_mode->panel_xres) |
xres = native_mode->panel_xres; |
if (yres > native_mode->panel_yres) |
yres = native_mode->panel_yres; |
if (xres == native_mode->panel_xres) |
hscale = false; |
if (yres == native_mode->panel_yres) |
vscale = false; |
} |
switch (radeon_crtc->rmx_type) { |
case RMX_FULL: |
case RMX_ASPECT: |
if (!hscale) |
fp_horz_stretch |= ((xres/8-1) << 16); |
else { |
inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; |
scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) |
/ native_mode->panel_xres + 1; |
fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | |
RADEON_HORZ_STRETCH_BLEND | |
RADEON_HORZ_STRETCH_ENABLE | |
((native_mode->panel_xres/8-1) << 16)); |
} |
if (!vscale) |
fp_vert_stretch |= ((yres-1) << 12); |
else { |
inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; |
scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) |
/ native_mode->panel_yres + 1; |
fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | |
RADEON_VERT_STRETCH_ENABLE | |
RADEON_VERT_STRETCH_BLEND | |
((native_mode->panel_yres-1) << 12)); |
} |
break; |
case RMX_CENTER: |
fp_horz_stretch |= ((xres/8-1) << 16); |
fp_vert_stretch |= ((yres-1) << 12); |
crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | |
RADEON_CRTC_AUTO_VERT_CENTER_EN); |
blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; |
if (blank_width > 110) |
blank_width = 110; |
fp_crtc_h_total_disp = (((blank_width) & 0x3ff) |
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); |
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; |
if (!hsync_wid) |
hsync_wid = 1; |
fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) |
| ((hsync_wid & 0x3f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NHSYNC) |
? RADEON_CRTC_H_SYNC_POL |
: 0)); |
fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) |
| ((mode->crtc_vdisplay - 1) << 16)); |
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; |
if (!vsync_wid) |
vsync_wid = 1; |
fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) |
| ((vsync_wid & 0x1f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NVSYNC) |
? RADEON_CRTC_V_SYNC_POL |
: 0))); |
fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | |
(((native_mode->panel_xres / 8) & 0x1ff) << 16)); |
break; |
case RMX_OFF: |
default: |
fp_horz_stretch |= ((xres/8-1) << 16); |
fp_vert_stretch |= ((yres-1) << 12); |
break; |
} |
WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); |
WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); |
WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); |
WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); |
WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); |
WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); |
WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); |
WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); |
} |
void radeon_restore_common_regs(struct drm_device *dev) |
{ |
/* don't need this yet */ |
311,13 → 145,10 |
RADEON_CRTC_DISP_REQ_EN_B)); |
WREG32_P(RADEON_CRTC_EXT_CNTL, 0, ~mask); |
} |
// drm_vblank_post_modeset(dev, radeon_crtc->crtc_id); |
radeon_crtc_load_lut(crtc); |
break; |
case DRM_MODE_DPMS_STANDBY: |
case DRM_MODE_DPMS_SUSPEND: |
case DRM_MODE_DPMS_OFF: |
// drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id); |
if (radeon_crtc->crtc_id) |
WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask); |
else { |
327,7 → 158,11 |
} |
break; |
} |
if (mode != DRM_MODE_DPMS_OFF) { |
radeon_crtc_load_lut(crtc); |
} |
} |
/* properly set crtc bpp when using atombios */ |
void radeon_legacy_atom_set_surface(struct drm_crtc *crtc) |
341,9 → 176,6 |
uint32_t crtc_pitch; |
switch (crtc->fb->bits_per_pixel) { |
case 8: |
format = 2; |
break; |
case 15: /* 555 */ |
format = 3; |
break; |
403,44 → 235,16 |
uint64_t base; |
uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0; |
uint32_t crtc_pitch, pitch_pixels; |
uint32_t tiling_flags; |
int format; |
uint32_t gen_cntl_reg, gen_cntl_val; |
DRM_DEBUG("\n"); |
radeon_fb = to_radeon_framebuffer(crtc->fb); |
switch (crtc->fb->bits_per_pixel) { |
case 8: |
format = 2; |
break; |
case 15: /* 555 */ |
format = 3; |
break; |
case 16: /* 565 */ |
format = 4; |
break; |
case 24: /* RGB */ |
format = 5; |
break; |
case 32: /* xRGB */ |
format = 6; |
break; |
default: |
return false; |
} |
obj = radeon_fb->obj; |
// if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) { |
// return -EINVAL; |
// } |
/* if scanout was in GTT this really wouldn't work */ |
/* crtc offset is from display base addr not FB location */ |
radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location; |
base -= radeon_crtc->legacy_display_base_addr; |
crtc_offset = (u32)base; |
crtc_offset_cntl = 0; |
pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8); |
449,12 → 253,8 |
(crtc->fb->bits_per_pixel * 8)); |
crtc_pitch |= crtc_pitch << 16; |
// radeon_object_get_tiling_flags(obj->driver_private, |
// &tiling_flags, NULL); |
if (tiling_flags & RADEON_TILING_MICRO) |
DRM_ERROR("trying to scanout microtiled buffer\n"); |
if (tiling_flags & RADEON_TILING_MACRO) { |
/* TODO tiling */ |
if (0) { |
if (ASIC_IS_R300(rdev)) |
crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | |
R300_CRTC_MICRO_TILE_BUFFER_DIS | |
470,13 → 270,15 |
crtc_offset_cntl &= ~RADEON_CRTC_TILE_EN; |
} |
if (tiling_flags & RADEON_TILING_MACRO) { |
/* TODO more tiling */ |
if (0) { |
if (ASIC_IS_R300(rdev)) { |
crtc_tile_x0_y0 = x | (y << 16); |
base &= ~0x7ff; |
} else { |
int byteshift = crtc->fb->bits_per_pixel >> 4; |
int tile_addr = (((y >> 3) * pitch_pixels + x) >> (8 - byteshift)) << 11; |
int tile_addr = (((y >> 3) * crtc->fb->width + x) >> (8 - byteshift)) << 11; |
base += tile_addr + ((x << byteshift) % 256) + ((y % 8) << 8); |
crtc_offset_cntl |= (y % 16); |
} |
483,9 → 285,6 |
} else { |
int offset = y * pitch_pixels + x; |
switch (crtc->fb->bits_per_pixel) { |
case 8: |
offset *= 1; |
break; |
case 15: |
case 16: |
offset *= 2; |
504,19 → 303,11 |
base &= ~7; |
if (radeon_crtc->crtc_id == 1) |
gen_cntl_reg = RADEON_CRTC2_GEN_CNTL; |
else |
gen_cntl_reg = RADEON_CRTC_GEN_CNTL; |
/* update sarea TODO */ |
gen_cntl_val = RREG32(gen_cntl_reg); |
gen_cntl_val &= ~(0xf << 8); |
gen_cntl_val |= (format << 8); |
WREG32(gen_cntl_reg, gen_cntl_val); |
crtc_offset = (u32)base; |
WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, radeon_crtc->legacy_display_base_addr); |
WREG32(RADEON_DISPLAY_BASE_ADDR + radeon_crtc->crtc_offset, rdev->mc.vram_location); |
if (ASIC_IS_R300(rdev)) { |
if (radeon_crtc->crtc_id) |
540,7 → 331,6 |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct drm_encoder *encoder; |
int format; |
int hsync_start; |
int hsync_wid; |
549,24 → 339,10 |
uint32_t crtc_h_sync_strt_wid; |
uint32_t crtc_v_total_disp; |
uint32_t crtc_v_sync_strt_wid; |
bool is_tv = false; |
DRM_DEBUG("\n"); |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc == crtc) { |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { |
is_tv = true; |
DRM_INFO("crtc %d is connected to a TV\n", radeon_crtc->crtc_id); |
break; |
} |
} |
} |
switch (crtc->fb->bits_per_pixel) { |
case 8: |
format = 2; |
break; |
case 15: /* 555 */ |
format = 3; |
break; |
696,11 → 472,6 |
WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); |
} |
if (is_tv) |
radeon_legacy_tv_adjust_crtc_reg(encoder, &crtc_h_total_disp, |
&crtc_h_sync_strt_wid, &crtc_v_total_disp, |
&crtc_v_sync_strt_wid); |
WREG32(RADEON_CRTC_H_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_h_total_disp); |
WREG32(RADEON_CRTC_H_SYNC_STRT_WID + radeon_crtc->crtc_offset, crtc_h_sync_strt_wid); |
WREG32(RADEON_CRTC_V_TOTAL_DISP + radeon_crtc->crtc_offset, crtc_v_total_disp); |
727,7 → 498,7 |
uint32_t pll_ref_div = 0; |
uint32_t pll_fb_post_div = 0; |
uint32_t htotal_cntl = 0; |
bool is_tv = false; |
struct radeon_pll *pll; |
struct { |
762,13 → 533,6 |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
if (encoder->crtc == crtc) { |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
if (radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT) { |
is_tv = true; |
break; |
} |
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) |
pll_flags |= RADEON_PLL_NO_ODD_POST_DIV; |
if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) { |
832,12 → 596,6 |
~(RADEON_PIX2CLK_SRC_SEL_MASK)) | |
RADEON_PIX2CLK_SRC_SEL_P2PLLCLK); |
if (is_tv) { |
radeon_legacy_tv_adjust_pll2(encoder, &htotal_cntl, |
&pll_ref_div, &pll_fb_post_div, |
&pixclks_cntl); |
} |
WREG32_PLL_P(RADEON_PIXCLKS_CNTL, |
RADEON_PIX2CLK_SRC_SEL_CPUCLK, |
~(RADEON_PIX2CLK_SRC_SEL_MASK)); |
892,15 → 650,6 |
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); |
} else { |
uint32_t pixclks_cntl; |
if (is_tv) { |
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); |
radeon_legacy_tv_adjust_pll1(encoder, &htotal_cntl, &pll_ref_div, |
&pll_fb_post_div, &pixclks_cntl); |
} |
if (rdev->flags & RADEON_IS_MOBILITY) { |
/* A temporal workaround for the occational blanking on certain laptop panels. |
This appears to related to the PLL divider registers (fail to lock?). |
995,8 → 744,6 |
RADEON_VCLK_SRC_SEL_PPLLCLK, |
~(RADEON_VCLK_SRC_SEL_MASK)); |
if (is_tv) |
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); |
} |
} |
1004,8 → 751,6 |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) |
return false; |
return true; |
} |
1014,25 → 759,16 |
struct drm_display_mode *adjusted_mode, |
int x, int y, struct drm_framebuffer *old_fb) |
{ |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); |
struct drm_device *dev = crtc->dev; |
struct radeon_device *rdev = dev->dev_private; |
DRM_DEBUG("\n"); |
/* TODO TV */ |
radeon_crtc_set_base(crtc, x, y, old_fb); |
radeon_set_crtc_timing(crtc, adjusted_mode); |
radeon_set_pll(crtc, adjusted_mode); |
radeon_bandwidth_update(rdev); |
if (radeon_crtc->crtc_id == 0) { |
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); |
} else { |
if (radeon_crtc->rmx_type != RMX_OFF) { |
/* FIXME: only first crtc has rmx what should we |
* do ? |
*/ |
DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); |
} |
} |
radeon_init_disp_bandwidth(crtc->dev); |
return 0; |
} |
1063,3 → 799,478 |
radeon_crtc->crtc_offset = RADEON_CRTC2_H_TOTAL_DISP - RADEON_CRTC_H_TOTAL_DISP; |
drm_crtc_helper_add(&radeon_crtc->base, &legacy_helper_funcs); |
} |
void radeon_init_disp_bw_legacy(struct drm_device *dev, |
struct drm_display_mode *mode1, |
uint32_t pixel_bytes1, |
struct drm_display_mode *mode2, |
uint32_t pixel_bytes2) |
{ |
struct radeon_device *rdev = dev->dev_private; |
fixed20_12 trcd_ff, trp_ff, tras_ff, trbs_ff, tcas_ff; |
fixed20_12 sclk_ff, mclk_ff, sclk_eff_ff, sclk_delay_ff; |
fixed20_12 peak_disp_bw, mem_bw, pix_clk, pix_clk2, temp_ff, crit_point_ff; |
uint32_t temp, data, mem_trcd, mem_trp, mem_tras; |
fixed20_12 memtcas_ff[8] = { |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init(0), |
}; |
fixed20_12 memtcas_rs480_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(0), |
fixed_init_half(1), |
fixed_init_half(2), |
fixed_init_half(3), |
}; |
fixed20_12 memtcas2_ff[8] = { |
fixed_init(0), |
fixed_init(1), |
fixed_init(2), |
fixed_init(3), |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
}; |
fixed20_12 memtrbs[8] = { |
fixed_init(1), |
fixed_init_half(1), |
fixed_init(2), |
fixed_init_half(2), |
fixed_init(3), |
fixed_init_half(3), |
fixed_init(4), |
fixed_init_half(4) |
}; |
fixed20_12 memtrbs_r4xx[8] = { |
fixed_init(4), |
fixed_init(5), |
fixed_init(6), |
fixed_init(7), |
fixed_init(8), |
fixed_init(9), |
fixed_init(10), |
fixed_init(11) |
}; |
fixed20_12 min_mem_eff; |
fixed20_12 mc_latency_sclk, mc_latency_mclk, k1; |
fixed20_12 cur_latency_mclk, cur_latency_sclk; |
fixed20_12 disp_latency, disp_latency_overhead, disp_drain_rate, |
disp_drain_rate2, read_return_rate; |
fixed20_12 time_disp1_drop_priority; |
int c; |
int cur_size = 16; /* in octawords */ |
int critical_point = 0, critical_point2; |
/* uint32_t read_return_rate, time_disp1_drop_priority; */ |
int stop_req, max_stop_req; |
min_mem_eff.full = rfixed_const_8(0); |
/* get modes */ |
if ((rdev->disp_priority == 2) && ASIC_IS_R300(rdev)) { |
uint32_t mc_init_misc_lat_timer = RREG32(R300_MC_INIT_MISC_LAT_TIMER); |
mc_init_misc_lat_timer &= ~(R300_MC_DISP1R_INIT_LAT_MASK << R300_MC_DISP1R_INIT_LAT_SHIFT); |
mc_init_misc_lat_timer &= ~(R300_MC_DISP0R_INIT_LAT_MASK << R300_MC_DISP0R_INIT_LAT_SHIFT); |
/* check crtc enables */ |
if (mode2) |
mc_init_misc_lat_timer |= (1 << R300_MC_DISP1R_INIT_LAT_SHIFT); |
if (mode1) |
mc_init_misc_lat_timer |= (1 << R300_MC_DISP0R_INIT_LAT_SHIFT); |
WREG32(R300_MC_INIT_MISC_LAT_TIMER, mc_init_misc_lat_timer); |
} |
/* |
* determine is there is enough bw for current mode |
*/ |
mclk_ff.full = rfixed_const(rdev->clock.default_mclk); |
temp_ff.full = rfixed_const(100); |
mclk_ff.full = rfixed_div(mclk_ff, temp_ff); |
sclk_ff.full = rfixed_const(rdev->clock.default_sclk); |
sclk_ff.full = rfixed_div(sclk_ff, temp_ff); |
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1); |
temp_ff.full = rfixed_const(temp); |
mem_bw.full = rfixed_mul(mclk_ff, temp_ff); |
pix_clk.full = 0; |
pix_clk2.full = 0; |
peak_disp_bw.full = 0; |
if (mode1) { |
temp_ff.full = rfixed_const(1000); |
pix_clk.full = rfixed_const(mode1->clock); /* convert to fixed point */ |
pix_clk.full = rfixed_div(pix_clk, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes1); |
peak_disp_bw.full += rfixed_mul(pix_clk, temp_ff); |
} |
if (mode2) { |
temp_ff.full = rfixed_const(1000); |
pix_clk2.full = rfixed_const(mode2->clock); /* convert to fixed point */ |
pix_clk2.full = rfixed_div(pix_clk2, temp_ff); |
temp_ff.full = rfixed_const(pixel_bytes2); |
peak_disp_bw.full += rfixed_mul(pix_clk2, temp_ff); |
} |
mem_bw.full = rfixed_mul(mem_bw, min_mem_eff); |
if (peak_disp_bw.full >= mem_bw.full) { |
DRM_ERROR("You may not have enough display bandwidth for current mode\n" |
"If you have flickering problem, try to lower resolution, refresh rate, or color depth\n"); |
} |
/* Get values from the EXT_MEM_CNTL register...converting its contents. */ |
temp = RREG32(RADEON_MEM_TIMING_CNTL); |
if ((rdev->family == CHIP_RV100) || (rdev->flags & RADEON_IS_IGP)) { /* RV100, M6, IGPs */ |
mem_trcd = ((temp >> 2) & 0x3) + 1; |
mem_trp = ((temp & 0x3)) + 1; |
mem_tras = ((temp & 0x70) >> 4) + 1; |
} else if (rdev->family == CHIP_R300 || |
rdev->family == CHIP_R350) { /* r300, r350 */ |
mem_trcd = (temp & 0x7) + 1; |
mem_trp = ((temp >> 8) & 0x7) + 1; |
mem_tras = ((temp >> 11) & 0xf) + 4; |
} else if (rdev->family == CHIP_RV350 || |
rdev->family <= CHIP_RV380) { |
/* rv3x0 */ |
mem_trcd = (temp & 0x7) + 3; |
mem_trp = ((temp >> 8) & 0x7) + 3; |
mem_tras = ((temp >> 11) & 0xf) + 6; |
} else if (rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423 || |
rdev->family == CHIP_RV410) { |
/* r4xx */ |
mem_trcd = (temp & 0xf) + 3; |
if (mem_trcd > 15) |
mem_trcd = 15; |
mem_trp = ((temp >> 8) & 0xf) + 3; |
if (mem_trp > 15) |
mem_trp = 15; |
mem_tras = ((temp >> 12) & 0x1f) + 6; |
if (mem_tras > 31) |
mem_tras = 31; |
} else { /* RV200, R200 */ |
mem_trcd = (temp & 0x7) + 1; |
mem_trp = ((temp >> 8) & 0x7) + 1; |
mem_tras = ((temp >> 12) & 0xf) + 4; |
} |
/* convert to FF */ |
trcd_ff.full = rfixed_const(mem_trcd); |
trp_ff.full = rfixed_const(mem_trp); |
tras_ff.full = rfixed_const(mem_tras); |
/* Get values from the MEM_SDRAM_MODE_REG register...converting its */ |
temp = RREG32(RADEON_MEM_SDRAM_MODE_REG); |
data = (temp & (7 << 20)) >> 20; |
if ((rdev->family == CHIP_RV100) || rdev->flags & RADEON_IS_IGP) { |
if (rdev->family == CHIP_RS480) /* don't think rs400 */ |
tcas_ff = memtcas_rs480_ff[data]; |
else |
tcas_ff = memtcas_ff[data]; |
} else |
tcas_ff = memtcas2_ff[data]; |
if (rdev->family == CHIP_RS400 || |
rdev->family == CHIP_RS480) { |
/* extra cas latency stored in bits 23-25 0-4 clocks */ |
data = (temp >> 23) & 0x7; |
if (data < 5) |
tcas_ff.full += rfixed_const(data); |
} |
if (ASIC_IS_R300(rdev) && !(rdev->flags & RADEON_IS_IGP)) { |
/* on the R300, Tcas is included in Trbs. |
*/ |
temp = RREG32(RADEON_MEM_CNTL); |
data = (R300_MEM_NUM_CHANNELS_MASK & temp); |
if (data == 1) { |
if (R300_MEM_USE_CD_CH_ONLY & temp) { |
temp = RREG32(R300_MC_IND_INDEX); |
temp &= ~R300_MC_IND_ADDR_MASK; |
temp |= R300_MC_READ_CNTL_CD_mcind; |
WREG32(R300_MC_IND_INDEX, temp); |
temp = RREG32(R300_MC_IND_DATA); |
data = (R300_MEM_RBS_POSITION_C_MASK & temp); |
} else { |
temp = RREG32(R300_MC_READ_CNTL_AB); |
data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
} |
} else { |
temp = RREG32(R300_MC_READ_CNTL_AB); |
data = (R300_MEM_RBS_POSITION_A_MASK & temp); |
} |
if (rdev->family == CHIP_RV410 || |
rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423) |
trbs_ff = memtrbs_r4xx[data]; |
else |
trbs_ff = memtrbs[data]; |
tcas_ff.full += trbs_ff.full; |
} |
sclk_eff_ff.full = sclk_ff.full; |
// if (rdev->flags & RADEON_IS_AGP) { |
// fixed20_12 agpmode_ff; |
// agpmode_ff.full = rfixed_const(radeon_agpmode); |
// temp_ff.full = rfixed_const_666(16); |
// sclk_eff_ff.full -= rfixed_mul(agpmode_ff, temp_ff); |
// } |
/* TODO PCIE lanes may affect this - agpmode == 16?? */ |
if (ASIC_IS_R300(rdev)) { |
sclk_delay_ff.full = rfixed_const(250); |
} else { |
if ((rdev->family == CHIP_RV100) || |
rdev->flags & RADEON_IS_IGP) { |
if (rdev->mc.vram_is_ddr) |
sclk_delay_ff.full = rfixed_const(41); |
else |
sclk_delay_ff.full = rfixed_const(33); |
} else { |
if (rdev->mc.vram_width == 128) |
sclk_delay_ff.full = rfixed_const(57); |
else |
sclk_delay_ff.full = rfixed_const(41); |
} |
} |
mc_latency_sclk.full = rfixed_div(sclk_delay_ff, sclk_eff_ff); |
if (rdev->mc.vram_is_ddr) { |
if (rdev->mc.vram_width == 32) { |
k1.full = rfixed_const(40); |
c = 3; |
} else { |
k1.full = rfixed_const(20); |
c = 1; |
} |
} else { |
k1.full = rfixed_const(40); |
c = 3; |
} |
temp_ff.full = rfixed_const(2); |
mc_latency_mclk.full = rfixed_mul(trcd_ff, temp_ff); |
temp_ff.full = rfixed_const(c); |
mc_latency_mclk.full += rfixed_mul(tcas_ff, temp_ff); |
temp_ff.full = rfixed_const(4); |
mc_latency_mclk.full += rfixed_mul(tras_ff, temp_ff); |
mc_latency_mclk.full += rfixed_mul(trp_ff, temp_ff); |
mc_latency_mclk.full += k1.full; |
mc_latency_mclk.full = rfixed_div(mc_latency_mclk, mclk_ff); |
mc_latency_mclk.full += rfixed_div(temp_ff, sclk_eff_ff); |
/* |
HW cursor time assuming worst case of full size colour cursor. |
*/ |
temp_ff.full = rfixed_const((2 * (cur_size - (rdev->mc.vram_is_ddr + 1)))); |
temp_ff.full += trcd_ff.full; |
if (temp_ff.full < tras_ff.full) |
temp_ff.full = tras_ff.full; |
cur_latency_mclk.full = rfixed_div(temp_ff, mclk_ff); |
temp_ff.full = rfixed_const(cur_size); |
cur_latency_sclk.full = rfixed_div(temp_ff, sclk_eff_ff); |
/* |
Find the total latency for the display data. |
*/ |
disp_latency_overhead.full = rfixed_const(80); |
disp_latency_overhead.full = rfixed_div(disp_latency_overhead, sclk_ff); |
mc_latency_mclk.full += disp_latency_overhead.full + cur_latency_mclk.full; |
mc_latency_sclk.full += disp_latency_overhead.full + cur_latency_sclk.full; |
if (mc_latency_mclk.full > mc_latency_sclk.full) |
disp_latency.full = mc_latency_mclk.full; |
else |
disp_latency.full = mc_latency_sclk.full; |
/* setup Max GRPH_STOP_REQ default value */ |
if (ASIC_IS_RV100(rdev)) |
max_stop_req = 0x5c; |
else |
max_stop_req = 0x7c; |
if (mode1) { |
/* CRTC1 |
Set GRPH_BUFFER_CNTL register using h/w defined optimal values. |
GRPH_STOP_REQ <= MIN[ 0x7C, (CRTC_H_DISP + 1) * (bit depth) / 0x10 ] |
*/ |
stop_req = mode1->hdisplay * pixel_bytes1 / 16; |
if (stop_req > max_stop_req) |
stop_req = max_stop_req; |
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes1)); |
disp_drain_rate.full = rfixed_div(pix_clk, temp_ff); |
/* |
Find the critical point of the display buffer. |
*/ |
crit_point_ff.full = rfixed_mul(disp_drain_rate, disp_latency); |
crit_point_ff.full += rfixed_const_half(0); |
critical_point = rfixed_trunc(crit_point_ff); |
if (rdev->disp_priority == 2) { |
critical_point = 0; |
} |
/* |
The critical point should never be above max_stop_req-4. Setting |
GRPH_CRITICAL_CNTL = 0 will thus force high priority all the time. |
*/ |
if (max_stop_req - critical_point < 4) |
critical_point = 0; |
if (critical_point == 0 && mode2 && rdev->family == CHIP_R300) { |
/* some R300 cards have problem with this set to 0, when CRTC2 is enabled.*/ |
critical_point = 0x10; |
} |
temp = RREG32(RADEON_GRPH_BUFFER_CNTL); |
temp &= ~(RADEON_GRPH_STOP_REQ_MASK); |
temp |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
temp &= ~(RADEON_GRPH_START_REQ_MASK); |
if ((rdev->family == CHIP_R350) && |
(stop_req > 0x15)) { |
stop_req -= 0x10; |
} |
temp |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
temp |= RADEON_GRPH_BUFFER_SIZE; |
temp &= ~(RADEON_GRPH_CRITICAL_CNTL | |
RADEON_GRPH_CRITICAL_AT_SOF | |
RADEON_GRPH_STOP_CNTL); |
/* |
Write the result into the register. |
*/ |
WREG32(RADEON_GRPH_BUFFER_CNTL, ((temp & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
(critical_point << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
#if 0 |
if ((rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) { |
/* attempt to program RS400 disp regs correctly ??? */ |
temp = RREG32(RS400_DISP1_REG_CNTL); |
temp &= ~(RS400_DISP1_START_REQ_LEVEL_MASK | |
RS400_DISP1_STOP_REQ_LEVEL_MASK); |
WREG32(RS400_DISP1_REQ_CNTL1, (temp | |
(critical_point << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
(critical_point << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
temp = RREG32(RS400_DMIF_MEM_CNTL1); |
temp &= ~(RS400_DISP1_CRITICAL_POINT_START_MASK | |
RS400_DISP1_CRITICAL_POINT_STOP_MASK); |
WREG32(RS400_DMIF_MEM_CNTL1, (temp | |
(critical_point << RS400_DISP1_CRITICAL_POINT_START_SHIFT) | |
(critical_point << RS400_DISP1_CRITICAL_POINT_STOP_SHIFT))); |
} |
#endif |
DRM_DEBUG("GRPH_BUFFER_CNTL from to %x\n", |
/* (unsigned int)info->SavedReg->grph_buffer_cntl, */ |
(unsigned int)RREG32(RADEON_GRPH_BUFFER_CNTL)); |
} |
if (mode2) { |
u32 grph2_cntl; |
stop_req = mode2->hdisplay * pixel_bytes2 / 16; |
if (stop_req > max_stop_req) |
stop_req = max_stop_req; |
/* |
Find the drain rate of the display buffer. |
*/ |
temp_ff.full = rfixed_const((16/pixel_bytes2)); |
disp_drain_rate2.full = rfixed_div(pix_clk2, temp_ff); |
grph2_cntl = RREG32(RADEON_GRPH2_BUFFER_CNTL); |
grph2_cntl &= ~(RADEON_GRPH_STOP_REQ_MASK); |
grph2_cntl |= (stop_req << RADEON_GRPH_STOP_REQ_SHIFT); |
grph2_cntl &= ~(RADEON_GRPH_START_REQ_MASK); |
if ((rdev->family == CHIP_R350) && |
(stop_req > 0x15)) { |
stop_req -= 0x10; |
} |
grph2_cntl |= (stop_req << RADEON_GRPH_START_REQ_SHIFT); |
grph2_cntl |= RADEON_GRPH_BUFFER_SIZE; |
grph2_cntl &= ~(RADEON_GRPH_CRITICAL_CNTL | |
RADEON_GRPH_CRITICAL_AT_SOF | |
RADEON_GRPH_STOP_CNTL); |
if ((rdev->family == CHIP_RS100) || |
(rdev->family == CHIP_RS200)) |
critical_point2 = 0; |
else { |
temp = (rdev->mc.vram_width * rdev->mc.vram_is_ddr + 1)/128; |
temp_ff.full = rfixed_const(temp); |
temp_ff.full = rfixed_mul(mclk_ff, temp_ff); |
if (sclk_ff.full < temp_ff.full) |
temp_ff.full = sclk_ff.full; |
read_return_rate.full = temp_ff.full; |
if (mode1) { |
temp_ff.full = read_return_rate.full - disp_drain_rate.full; |
time_disp1_drop_priority.full = rfixed_div(crit_point_ff, temp_ff); |
} else { |
time_disp1_drop_priority.full = 0; |
} |
crit_point_ff.full = disp_latency.full + time_disp1_drop_priority.full + disp_latency.full; |
crit_point_ff.full = rfixed_mul(crit_point_ff, disp_drain_rate2); |
crit_point_ff.full += rfixed_const_half(0); |
critical_point2 = rfixed_trunc(crit_point_ff); |
if (rdev->disp_priority == 2) { |
critical_point2 = 0; |
} |
if (max_stop_req - critical_point2 < 4) |
critical_point2 = 0; |
} |
if (critical_point2 == 0 && rdev->family == CHIP_R300) { |
/* some R300 cards have problem with this set to 0 */ |
critical_point2 = 0x10; |
} |
WREG32(RADEON_GRPH2_BUFFER_CNTL, ((grph2_cntl & ~RADEON_GRPH_CRITICAL_POINT_MASK) | |
(critical_point2 << RADEON_GRPH_CRITICAL_POINT_SHIFT))); |
if ((rdev->family == CHIP_RS400) || |
(rdev->family == CHIP_RS480)) { |
#if 0 |
/* attempt to program RS400 disp2 regs correctly ??? */ |
temp = RREG32(RS400_DISP2_REQ_CNTL1); |
temp &= ~(RS400_DISP2_START_REQ_LEVEL_MASK | |
RS400_DISP2_STOP_REQ_LEVEL_MASK); |
WREG32(RS400_DISP2_REQ_CNTL1, (temp | |
(critical_point2 << RS400_DISP1_START_REQ_LEVEL_SHIFT) | |
(critical_point2 << RS400_DISP1_STOP_REQ_LEVEL_SHIFT))); |
temp = RREG32(RS400_DISP2_REQ_CNTL2); |
temp &= ~(RS400_DISP2_CRITICAL_POINT_START_MASK | |
RS400_DISP2_CRITICAL_POINT_STOP_MASK); |
WREG32(RS400_DISP2_REQ_CNTL2, (temp | |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_START_SHIFT) | |
(critical_point2 << RS400_DISP2_CRITICAL_POINT_STOP_SHIFT))); |
#endif |
WREG32(RS400_DISP2_REQ_CNTL1, 0x105DC1CC); |
WREG32(RS400_DISP2_REQ_CNTL2, 0x2749D000); |
WREG32(RS400_DMIF_MEM_CNTL1, 0x29CA71DC); |
WREG32(RS400_DISP1_REQ_CNTL1, 0x28FBC3AC); |
} |
DRM_DEBUG("GRPH2_BUFFER_CNTL from to %x\n", |
(unsigned int)RREG32(RADEON_GRPH2_BUFFER_CNTL)); |
} |
} |
/drivers/video/drm/radeon/radeon_legacy_encoders.c |
---|
29,16 → 29,171 |
#include "radeon.h" |
#include "atom.h" |
static void radeon_legacy_encoder_disable(struct drm_encoder *encoder) |
static void radeon_legacy_rmx_mode_set(struct drm_encoder *encoder, |
struct drm_display_mode *mode, |
struct drm_display_mode *adjusted_mode) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct drm_encoder_helper_funcs *encoder_funcs; |
int xres = mode->hdisplay; |
int yres = mode->vdisplay; |
bool hscale = true, vscale = true; |
int hsync_wid; |
int vsync_wid; |
int hsync_start; |
uint32_t scale, inc; |
uint32_t fp_horz_stretch, fp_vert_stretch, crtc_more_cntl, fp_horz_vert_active; |
uint32_t fp_h_sync_strt_wid, fp_v_sync_strt_wid, fp_crtc_h_total_disp, fp_crtc_v_total_disp; |
struct radeon_native_mode *native_mode = &radeon_encoder->native_mode; |
encoder_funcs = encoder->helper_private; |
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder->active_device = 0; |
DRM_DEBUG("\n"); |
fp_vert_stretch = RREG32(RADEON_FP_VERT_STRETCH) & |
(RADEON_VERT_STRETCH_RESERVED | |
RADEON_VERT_AUTO_RATIO_INC); |
fp_horz_stretch = RREG32(RADEON_FP_HORZ_STRETCH) & |
(RADEON_HORZ_FP_LOOP_STRETCH | |
RADEON_HORZ_AUTO_RATIO_INC); |
crtc_more_cntl = 0; |
if ((rdev->family == CHIP_RS100) || |
(rdev->family == CHIP_RS200)) { |
/* This is to workaround the asic bug for RMX, some versions |
of BIOS dosen't have this register initialized correctly. */ |
crtc_more_cntl |= RADEON_CRTC_H_CUTOFF_ACTIVE_EN; |
} |
fp_crtc_h_total_disp = ((((mode->crtc_htotal / 8) - 1) & 0x3ff) |
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); |
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; |
if (!hsync_wid) |
hsync_wid = 1; |
hsync_start = mode->crtc_hsync_start - 8; |
fp_h_sync_strt_wid = ((hsync_start & 0x1fff) |
| ((hsync_wid & 0x3f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NHSYNC) |
? RADEON_CRTC_H_SYNC_POL |
: 0)); |
fp_crtc_v_total_disp = (((mode->crtc_vtotal - 1) & 0xffff) |
| ((mode->crtc_vdisplay - 1) << 16)); |
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; |
if (!vsync_wid) |
vsync_wid = 1; |
fp_v_sync_strt_wid = (((mode->crtc_vsync_start - 1) & 0xfff) |
| ((vsync_wid & 0x1f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NVSYNC) |
? RADEON_CRTC_V_SYNC_POL |
: 0)); |
fp_horz_vert_active = 0; |
if (native_mode->panel_xres == 0 || |
native_mode->panel_yres == 0) { |
hscale = false; |
vscale = false; |
} else { |
if (xres > native_mode->panel_xres) |
xres = native_mode->panel_xres; |
if (yres > native_mode->panel_yres) |
yres = native_mode->panel_yres; |
if (xres == native_mode->panel_xres) |
hscale = false; |
if (yres == native_mode->panel_yres) |
vscale = false; |
} |
if (radeon_encoder->flags & RADEON_USE_RMX) { |
if (radeon_encoder->rmx_type != RMX_CENTER) { |
if (!hscale) |
fp_horz_stretch |= ((xres/8-1) << 16); |
else { |
inc = (fp_horz_stretch & RADEON_HORZ_AUTO_RATIO_INC) ? 1 : 0; |
scale = ((xres + inc) * RADEON_HORZ_STRETCH_RATIO_MAX) |
/ native_mode->panel_xres + 1; |
fp_horz_stretch |= (((scale) & RADEON_HORZ_STRETCH_RATIO_MASK) | |
RADEON_HORZ_STRETCH_BLEND | |
RADEON_HORZ_STRETCH_ENABLE | |
((native_mode->panel_xres/8-1) << 16)); |
} |
if (!vscale) |
fp_vert_stretch |= ((yres-1) << 12); |
else { |
inc = (fp_vert_stretch & RADEON_VERT_AUTO_RATIO_INC) ? 1 : 0; |
scale = ((yres + inc) * RADEON_VERT_STRETCH_RATIO_MAX) |
/ native_mode->panel_yres + 1; |
fp_vert_stretch |= (((scale) & RADEON_VERT_STRETCH_RATIO_MASK) | |
RADEON_VERT_STRETCH_ENABLE | |
RADEON_VERT_STRETCH_BLEND | |
((native_mode->panel_yres-1) << 12)); |
} |
} else if (radeon_encoder->rmx_type == RMX_CENTER) { |
int blank_width; |
fp_horz_stretch |= ((xres/8-1) << 16); |
fp_vert_stretch |= ((yres-1) << 12); |
crtc_more_cntl |= (RADEON_CRTC_AUTO_HORZ_CENTER_EN | |
RADEON_CRTC_AUTO_VERT_CENTER_EN); |
blank_width = (mode->crtc_hblank_end - mode->crtc_hblank_start) / 8; |
if (blank_width > 110) |
blank_width = 110; |
fp_crtc_h_total_disp = (((blank_width) & 0x3ff) |
| ((((mode->crtc_hdisplay / 8) - 1) & 0x1ff) << 16)); |
hsync_wid = (mode->crtc_hsync_end - mode->crtc_hsync_start) / 8; |
if (!hsync_wid) |
hsync_wid = 1; |
fp_h_sync_strt_wid = ((((mode->crtc_hsync_start - mode->crtc_hblank_start) / 8) & 0x1fff) |
| ((hsync_wid & 0x3f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NHSYNC) |
? RADEON_CRTC_H_SYNC_POL |
: 0)); |
fp_crtc_v_total_disp = (((mode->crtc_vblank_end - mode->crtc_vblank_start) & 0xffff) |
| ((mode->crtc_vdisplay - 1) << 16)); |
vsync_wid = mode->crtc_vsync_end - mode->crtc_vsync_start; |
if (!vsync_wid) |
vsync_wid = 1; |
fp_v_sync_strt_wid = ((((mode->crtc_vsync_start - mode->crtc_vblank_start) & 0xfff) |
| ((vsync_wid & 0x1f) << 16) |
| ((mode->flags & DRM_MODE_FLAG_NVSYNC) |
? RADEON_CRTC_V_SYNC_POL |
: 0))); |
fp_horz_vert_active = (((native_mode->panel_yres) & 0xfff) | |
(((native_mode->panel_xres / 8) & 0x1ff) << 16)); |
} |
} else { |
fp_horz_stretch |= ((xres/8-1) << 16); |
fp_vert_stretch |= ((yres-1) << 12); |
} |
WREG32(RADEON_FP_HORZ_STRETCH, fp_horz_stretch); |
WREG32(RADEON_FP_VERT_STRETCH, fp_vert_stretch); |
WREG32(RADEON_CRTC_MORE_CNTL, crtc_more_cntl); |
WREG32(RADEON_FP_HORZ_VERT_ACTIVE, fp_horz_vert_active); |
WREG32(RADEON_FP_H_SYNC_STRT_WID, fp_h_sync_strt_wid); |
WREG32(RADEON_FP_V_SYNC_STRT_WID, fp_v_sync_strt_wid); |
WREG32(RADEON_FP_CRTC_H_TOTAL_DISP, fp_crtc_h_total_disp); |
WREG32(RADEON_FP_CRTC_V_TOTAL_DISP, fp_crtc_v_total_disp); |
} |
static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode) |
{ |
struct drm_device *dev = encoder->dev; |
107,8 → 262,6 |
else |
radeon_combios_output_lock(encoder, true); |
radeon_legacy_lvds_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder_set_active_device(encoder); |
} |
static void radeon_legacy_lvds_commit(struct drm_encoder *encoder) |
134,6 → 287,9 |
DRM_DEBUG("\n"); |
if (radeon_crtc->crtc_id == 0) |
radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); |
lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); |
lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN; |
162,7 → 318,7 |
if (radeon_crtc->crtc_id == 0) { |
if (ASIC_IS_R300(rdev)) { |
if (radeon_encoder->rmx_type != RMX_OFF) |
if (radeon_encoder->flags & RADEON_USE_RMX) |
lvds_pll_cntl |= R300_LVDS_SRC_SEL_RMX; |
} else |
lvds_gen_cntl &= ~RADEON_LVDS_SEL_CRTC2; |
194,6 → 350,8 |
drm_mode_set_crtcinfo(adjusted_mode, 0); |
radeon_encoder->flags &= ~RADEON_USE_RMX; |
if (radeon_encoder->rmx_type != RMX_OFF) |
radeon_rmx_mode_fixup(encoder, mode, adjusted_mode); |
206,7 → 364,6 |
.prepare = radeon_legacy_lvds_prepare, |
.mode_set = radeon_legacy_lvds_mode_set, |
.commit = radeon_legacy_lvds_commit, |
.disable = radeon_legacy_encoder_disable, |
}; |
272,7 → 429,6 |
else |
radeon_combios_output_lock(encoder, true); |
radeon_legacy_primary_dac_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder_set_active_device(encoder); |
} |
static void radeon_legacy_primary_dac_commit(struct drm_encoder *encoder) |
299,6 → 455,9 |
DRM_DEBUG("\n"); |
if (radeon_crtc->crtc_id == 0) |
radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); |
if (radeon_crtc->crtc_id == 0) { |
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) { |
disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL) & |
415,7 → 574,6 |
.mode_set = radeon_legacy_primary_dac_mode_set, |
.commit = radeon_legacy_primary_dac_commit, |
.detect = radeon_legacy_primary_dac_detect, |
.disable = radeon_legacy_encoder_disable, |
}; |
468,7 → 626,6 |
else |
radeon_combios_output_lock(encoder, true); |
radeon_legacy_tmds_int_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder_set_active_device(encoder); |
} |
static void radeon_legacy_tmds_int_commit(struct drm_encoder *encoder) |
496,6 → 653,9 |
DRM_DEBUG("\n"); |
if (radeon_crtc->crtc_id == 0) |
radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); |
tmp = tmds_pll_cntl = RREG32(RADEON_TMDS_PLL_CNTL); |
tmp &= 0xfffff; |
if (rdev->family == CHIP_RV280) { |
551,7 → 711,7 |
if (radeon_crtc->crtc_id == 0) { |
if (ASIC_IS_R300(rdev) || rdev->family == CHIP_R200) { |
fp_gen_cntl &= ~R200_FP_SOURCE_SEL_MASK; |
if (radeon_encoder->rmx_type != RMX_OFF) |
if (radeon_encoder->flags & RADEON_USE_RMX) |
fp_gen_cntl |= R200_FP_SOURCE_SEL_RMX; |
else |
fp_gen_cntl |= R200_FP_SOURCE_SEL_CRTC1; |
581,7 → 741,6 |
.prepare = radeon_legacy_tmds_int_prepare, |
.mode_set = radeon_legacy_tmds_int_mode_set, |
.commit = radeon_legacy_tmds_int_commit, |
.disable = radeon_legacy_encoder_disable, |
}; |
636,7 → 795,6 |
else |
radeon_combios_output_lock(encoder, true); |
radeon_legacy_tmds_ext_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder_set_active_device(encoder); |
} |
static void radeon_legacy_tmds_ext_commit(struct drm_encoder *encoder) |
662,6 → 820,9 |
DRM_DEBUG("\n"); |
if (radeon_crtc->crtc_id == 0) |
radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); |
if (rdev->is_atom_bios) { |
radeon_encoder->pixel_clock = adjusted_mode->clock; |
atombios_external_tmds_setup(encoder, ATOM_ENABLE); |
695,7 → 856,7 |
if (radeon_crtc->crtc_id == 0) { |
if ((rdev->family == CHIP_R200) || ASIC_IS_R300(rdev)) { |
fp2_gen_cntl &= ~R200_FP2_SOURCE_SEL_MASK; |
if (radeon_encoder->rmx_type != RMX_OFF) |
if (radeon_encoder->flags & RADEON_USE_RMX) |
fp2_gen_cntl |= R200_FP2_SOURCE_SEL_RMX; |
else |
fp2_gen_cntl |= R200_FP2_SOURCE_SEL_CRTC1; |
723,7 → 884,6 |
.prepare = radeon_legacy_tmds_ext_prepare, |
.mode_set = radeon_legacy_tmds_ext_mode_set, |
.commit = radeon_legacy_tmds_ext_commit, |
.disable = radeon_legacy_encoder_disable, |
}; |
745,21 → 905,17 |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
uint32_t fp2_gen_cntl = 0, crtc2_gen_cntl = 0, tv_dac_cntl = 0; |
uint32_t tv_master_cntl = 0; |
bool is_tv; |
/* uint32_t tv_master_cntl = 0; */ |
DRM_DEBUG("\n"); |
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; |
if (rdev->family == CHIP_R200) |
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
else { |
if (is_tv) |
tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); |
else |
crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); |
/* FIXME TV */ |
/* tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); */ |
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
} |
768,11 → 924,8 |
if (rdev->family == CHIP_R200) { |
fp2_gen_cntl |= (RADEON_FP2_ON | RADEON_FP2_DVO_EN); |
} else { |
if (is_tv) |
tv_master_cntl |= RADEON_TV_ON; |
else |
crtc2_gen_cntl |= RADEON_CRTC2_CRT2_ON; |
/* tv_master_cntl |= RADEON_TV_ON; */ |
if (rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423 || |
rdev->family == CHIP_RV410) |
793,11 → 946,8 |
if (rdev->family == CHIP_R200) |
fp2_gen_cntl &= ~(RADEON_FP2_ON | RADEON_FP2_DVO_EN); |
else { |
if (is_tv) |
tv_master_cntl &= ~RADEON_TV_ON; |
else |
crtc2_gen_cntl &= ~RADEON_CRTC2_CRT2_ON; |
/* tv_master_cntl &= ~RADEON_TV_ON; */ |
if (rdev->family == CHIP_R420 || |
rdev->family == CHIP_R423 || |
rdev->family == CHIP_RV410) |
817,10 → 967,8 |
if (rdev->family == CHIP_R200) { |
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); |
} else { |
if (is_tv) |
WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); |
else |
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
/* WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); */ |
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); |
} |
839,7 → 987,6 |
else |
radeon_combios_output_lock(encoder, true); |
radeon_legacy_tv_dac_dpms(encoder, DRM_MODE_DPMS_OFF); |
radeon_encoder_set_active_device(encoder); |
} |
static void radeon_legacy_tv_dac_commit(struct drm_encoder *encoder) |
862,14 → 1009,13 |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; |
uint32_t tv_dac_cntl, gpiopad_a = 0, dac2_cntl, disp_output_cntl = 0; |
uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0, disp_tv_out_cntl = 0; |
bool is_tv = false; |
uint32_t disp_hw_debug = 0, fp2_gen_cntl = 0; |
DRM_DEBUG("\n"); |
is_tv = radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT ? true : false; |
if (radeon_crtc->crtc_id == 0) |
radeon_legacy_rmx_mode_set(encoder, mode, adjusted_mode); |
if (rdev->family != CHIP_R200) { |
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
893,7 → 1039,7 |
} |
/* FIXME TV */ |
if (tv_dac) { |
if (radeon_encoder->enc_priv) { |
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; |
tv_dac_cntl |= (RADEON_TV_DAC_NBLANK | |
RADEON_TV_DAC_NHOLD | |
910,52 → 1056,11 |
if (ASIC_IS_R300(rdev)) { |
gpiopad_a = RREG32(RADEON_GPIOPAD_A) | 1; |
disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); |
} |
if (rdev->family == CHIP_R200 || ASIC_IS_R300(rdev)) |
disp_tv_out_cntl = RREG32(RADEON_DISP_TV_OUT_CNTL); |
} else if (rdev->family == CHIP_R200) |
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
else |
disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG); |
if (rdev->family == CHIP_R200) |
fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); |
if (is_tv) { |
uint32_t dac_cntl; |
dac_cntl = RREG32(RADEON_DAC_CNTL); |
dac_cntl &= ~RADEON_DAC_TVO_EN; |
WREG32(RADEON_DAC_CNTL, dac_cntl); |
if (ASIC_IS_R300(rdev)) |
gpiopad_a = RREG32(RADEON_GPIOPAD_A) & ~1; |
dac2_cntl = RREG32(RADEON_DAC_CNTL2) & ~RADEON_DAC2_DAC2_CLK_SEL; |
if (radeon_crtc->crtc_id == 0) { |
if (ASIC_IS_R300(rdev)) { |
disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; |
disp_output_cntl |= (RADEON_DISP_TVDAC_SOURCE_CRTC | |
RADEON_DISP_TV_SOURCE_CRTC); |
} |
if (rdev->family >= CHIP_R200) { |
disp_tv_out_cntl &= ~RADEON_DISP_TV_PATH_SRC_CRTC2; |
} else { |
disp_hw_debug |= RADEON_CRT2_DISP1_SEL; |
} |
} else { |
if (ASIC_IS_R300(rdev)) { |
disp_output_cntl &= ~RADEON_DISP_TVDAC_SOURCE_MASK; |
disp_output_cntl |= RADEON_DISP_TV_SOURCE_CRTC; |
} |
if (rdev->family >= CHIP_R200) { |
disp_tv_out_cntl |= RADEON_DISP_TV_PATH_SRC_CRTC2; |
} else { |
disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; |
} |
} |
WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
} else { |
dac2_cntl = RREG32(RADEON_DAC_CNTL2) | RADEON_DAC2_DAC2_CLK_SEL; |
if (radeon_crtc->crtc_id == 0) { |
978,25 → 1083,17 |
} else |
disp_hw_debug &= ~RADEON_CRT2_DISP1_SEL; |
} |
WREG32(RADEON_DAC_CNTL2, dac2_cntl); |
} |
if (ASIC_IS_R300(rdev)) { |
WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); |
WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); |
} |
if (rdev->family >= CHIP_R200) |
WREG32(RADEON_DISP_TV_OUT_CNTL, disp_tv_out_cntl); |
WREG32(RADEON_DISP_TV_OUT_CNTL, disp_output_cntl); |
} else if (rdev->family == CHIP_R200) |
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); |
else |
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug); |
if (rdev->family == CHIP_R200) |
WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); |
if (is_tv) |
radeon_legacy_tv_mode_set(encoder, mode, adjusted_mode); |
if (rdev->is_atom_bios) |
radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id); |
else |
1004,141 → 1101,6 |
} |
static bool r300_legacy_tv_detect(struct drm_encoder *encoder, |
struct drm_connector *connector) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; |
uint32_t disp_output_cntl, gpiopad_a, tmp; |
bool found = false; |
/* save regs needed */ |
gpiopad_a = RREG32(RADEON_GPIOPAD_A); |
dac_cntl2 = RREG32(RADEON_DAC_CNTL2); |
crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL); |
dac_ext_cntl = RREG32(RADEON_DAC_EXT_CNTL); |
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
disp_output_cntl = RREG32(RADEON_DISP_OUTPUT_CNTL); |
WREG32_P(RADEON_GPIOPAD_A, 0, ~1); |
WREG32(RADEON_DAC_CNTL2, RADEON_DAC2_DAC2_CLK_SEL); |
WREG32(RADEON_CRTC2_GEN_CNTL, |
RADEON_CRTC2_CRT2_ON | RADEON_CRTC2_VSYNC_TRISTAT); |
tmp = disp_output_cntl & ~RADEON_DISP_TVDAC_SOURCE_MASK; |
tmp |= RADEON_DISP_TVDAC_SOURCE_CRTC2; |
WREG32(RADEON_DISP_OUTPUT_CNTL, tmp); |
WREG32(RADEON_DAC_EXT_CNTL, |
RADEON_DAC2_FORCE_BLANK_OFF_EN | |
RADEON_DAC2_FORCE_DATA_EN | |
RADEON_DAC_FORCE_DATA_SEL_RGB | |
(0xec << RADEON_DAC_FORCE_DATA_SHIFT)); |
WREG32(RADEON_TV_DAC_CNTL, |
RADEON_TV_DAC_STD_NTSC | |
(8 << RADEON_TV_DAC_BGADJ_SHIFT) | |
(6 << RADEON_TV_DAC_DACADJ_SHIFT)); |
RREG32(RADEON_TV_DAC_CNTL); |
mdelay(4); |
WREG32(RADEON_TV_DAC_CNTL, |
RADEON_TV_DAC_NBLANK | |
RADEON_TV_DAC_NHOLD | |
RADEON_TV_MONITOR_DETECT_EN | |
RADEON_TV_DAC_STD_NTSC | |
(8 << RADEON_TV_DAC_BGADJ_SHIFT) | |
(6 << RADEON_TV_DAC_DACADJ_SHIFT)); |
RREG32(RADEON_TV_DAC_CNTL); |
mdelay(6); |
tmp = RREG32(RADEON_TV_DAC_CNTL); |
if ((tmp & RADEON_TV_DAC_GDACDET) != 0) { |
found = true; |
DRM_DEBUG("S-video TV connection detected\n"); |
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { |
found = true; |
DRM_DEBUG("Composite TV connection detected\n"); |
} |
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); |
WREG32(RADEON_DAC_EXT_CNTL, dac_ext_cntl); |
WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); |
WREG32(RADEON_DISP_OUTPUT_CNTL, disp_output_cntl); |
WREG32(RADEON_DAC_CNTL2, dac_cntl2); |
WREG32_P(RADEON_GPIOPAD_A, gpiopad_a, ~1); |
return found; |
} |
static bool radeon_legacy_tv_detect(struct drm_encoder *encoder, |
struct drm_connector *connector) |
{ |
struct drm_device *dev = encoder->dev; |
struct radeon_device *rdev = dev->dev_private; |
uint32_t tv_dac_cntl, dac_cntl2; |
uint32_t config_cntl, tv_pre_dac_mux_cntl, tv_master_cntl, tmp; |
bool found = false; |
if (ASIC_IS_R300(rdev)) |
return r300_legacy_tv_detect(encoder, connector); |
dac_cntl2 = RREG32(RADEON_DAC_CNTL2); |
tv_master_cntl = RREG32(RADEON_TV_MASTER_CNTL); |
tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL); |
config_cntl = RREG32(RADEON_CONFIG_CNTL); |
tv_pre_dac_mux_cntl = RREG32(RADEON_TV_PRE_DAC_MUX_CNTL); |
tmp = dac_cntl2 & ~RADEON_DAC2_DAC2_CLK_SEL; |
WREG32(RADEON_DAC_CNTL2, tmp); |
tmp = tv_master_cntl | RADEON_TV_ON; |
tmp &= ~(RADEON_TV_ASYNC_RST | |
RADEON_RESTART_PHASE_FIX | |
RADEON_CRT_FIFO_CE_EN | |
RADEON_TV_FIFO_CE_EN | |
RADEON_RE_SYNC_NOW_SEL_MASK); |
tmp |= RADEON_TV_FIFO_ASYNC_RST | RADEON_CRT_ASYNC_RST; |
WREG32(RADEON_TV_MASTER_CNTL, tmp); |
tmp = RADEON_TV_DAC_NBLANK | RADEON_TV_DAC_NHOLD | |
RADEON_TV_MONITOR_DETECT_EN | RADEON_TV_DAC_STD_NTSC | |
(8 << RADEON_TV_DAC_BGADJ_SHIFT); |
if (config_cntl & RADEON_CFG_ATI_REV_ID_MASK) |
tmp |= (4 << RADEON_TV_DAC_DACADJ_SHIFT); |
else |
tmp |= (8 << RADEON_TV_DAC_DACADJ_SHIFT); |
WREG32(RADEON_TV_DAC_CNTL, tmp); |
tmp = RADEON_C_GRN_EN | RADEON_CMP_BLU_EN | |
RADEON_RED_MX_FORCE_DAC_DATA | |
RADEON_GRN_MX_FORCE_DAC_DATA | |
RADEON_BLU_MX_FORCE_DAC_DATA | |
(0x109 << RADEON_TV_FORCE_DAC_DATA_SHIFT); |
WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tmp); |
mdelay(3); |
tmp = RREG32(RADEON_TV_DAC_CNTL); |
if (tmp & RADEON_TV_DAC_GDACDET) { |
found = true; |
DRM_DEBUG("S-video TV connection detected\n"); |
} else if ((tmp & RADEON_TV_DAC_BDACDET) != 0) { |
found = true; |
DRM_DEBUG("Composite TV connection detected\n"); |
} |
WREG32(RADEON_TV_PRE_DAC_MUX_CNTL, tv_pre_dac_mux_cntl); |
WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl); |
WREG32(RADEON_TV_MASTER_CNTL, tv_master_cntl); |
WREG32(RADEON_DAC_CNTL2, dac_cntl2); |
return found; |
} |
static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder *encoder, |
struct drm_connector *connector) |
{ |
1147,30 → 1109,10 |
uint32_t crtc2_gen_cntl, tv_dac_cntl, dac_cntl2, dac_ext_cntl; |
uint32_t disp_hw_debug, disp_output_cntl, gpiopad_a, pixclks_cntl, tmp; |
enum drm_connector_status found = connector_status_disconnected; |
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
struct radeon_encoder_tv_dac *tv_dac = radeon_encoder->enc_priv; |
bool color = true; |
if (connector->connector_type == DRM_MODE_CONNECTOR_SVIDEO || |
connector->connector_type == DRM_MODE_CONNECTOR_Composite || |
connector->connector_type == DRM_MODE_CONNECTOR_9PinDIN) { |
bool tv_detect; |
/* FIXME tv */ |
if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_TV_SUPPORT)) |
return connector_status_disconnected; |
tv_detect = radeon_legacy_tv_detect(encoder, connector); |
if (tv_detect && tv_dac) |
found = connector_status_connected; |
return found; |
} |
/* don't probe if the encoder is being used for something else not CRT related */ |
if (radeon_encoder->active_device && !(radeon_encoder->active_device & ATOM_DEVICE_CRT_SUPPORT)) { |
DRM_INFO("not detecting due to %08x\n", radeon_encoder->active_device); |
return connector_status_disconnected; |
} |
/* save the regs we need */ |
pixclks_cntl = RREG32_PLL(RADEON_PIXCLKS_CNTL); |
gpiopad_a = ASIC_IS_R300(rdev) ? RREG32(RADEON_GPIOPAD_A) : 0; |
1252,7 → 1194,8 |
} |
WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); |
return found; |
/* return found; */ |
return connector_status_disconnected; |
} |
1263,7 → 1206,6 |
.mode_set = radeon_legacy_tv_dac_mode_set, |
.commit = radeon_legacy_tv_dac_commit, |
.detect = radeon_legacy_tv_dac_detect, |
.disable = radeon_legacy_encoder_disable, |
}; |
1271,30 → 1213,6 |
.destroy = radeon_enc_destroy, |
}; |
static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon_encoder *encoder) |
{ |
struct drm_device *dev = encoder->base.dev; |
struct radeon_device *rdev = dev->dev_private; |
struct radeon_encoder_int_tmds *tmds = NULL; |
bool ret; |
tmds = kzalloc(sizeof(struct radeon_encoder_int_tmds), GFP_KERNEL); |
if (!tmds) |
return NULL; |
if (rdev->is_atom_bios) |
ret = radeon_atombios_get_tmds_info(encoder, tmds); |
else |
ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); |
if (ret == false) |
radeon_legacy_get_tmds_info_from_table(encoder, tmds); |
return tmds; |
} |
void |
radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device) |
{ |
1325,11 → 1243,9 |
radeon_encoder->encoder_id = encoder_id; |
radeon_encoder->devices = supported_device; |
radeon_encoder->rmx_type = RMX_OFF; |
switch (radeon_encoder->encoder_id) { |
case ENCODER_OBJECT_ID_INTERNAL_LVDS: |
encoder->possible_crtcs = 0x1; |
drm_encoder_init(dev, encoder, &radeon_legacy_lvds_enc_funcs, DRM_MODE_ENCODER_LVDS); |
drm_encoder_helper_add(encoder, &radeon_legacy_lvds_helper_funcs); |
if (rdev->is_atom_bios) |
1341,7 → 1257,10 |
case ENCODER_OBJECT_ID_INTERNAL_TMDS1: |
drm_encoder_init(dev, encoder, &radeon_legacy_tmds_int_enc_funcs, DRM_MODE_ENCODER_TMDS); |
drm_encoder_helper_add(encoder, &radeon_legacy_tmds_int_helper_funcs); |
radeon_encoder->enc_priv = radeon_legacy_get_tmds_info(radeon_encoder); |
if (rdev->is_atom_bios) |
radeon_encoder->enc_priv = radeon_atombios_get_tmds_info(radeon_encoder); |
else |
radeon_encoder->enc_priv = radeon_combios_get_tmds_info(radeon_encoder); |
break; |
case ENCODER_OBJECT_ID_INTERNAL_DAC1: |
drm_encoder_init(dev, encoder, &radeon_legacy_primary_dac_enc_funcs, DRM_MODE_ENCODER_DAC); |
/drivers/video/drm/radeon/atom.c |
---|
22,9 → 22,12 |
* Author: Stanislaw Skowronek |
*/ |
#include <linux/module.h> |
#include <linux/sched.h> |
//#include <linux/module.h> |
//#include <linux/sched.h> |
#include <types.h> |
#include <syscall.h> |
#define ATOM_DEBUG |
#include "atom.h" |
1161,6 → 1164,9 |
int atom_asic_init(struct atom_context *ctx) |
{ |
dbgprintf("%s\n",__FUNCTION__); |
int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); |
uint32_t ps[16]; |
memset(ps, 0, 64); |
/drivers/video/drm/radeon/atombios.h |
---|
2374,17 → 2374,6 |
ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING]; |
} ATOM_ANALOG_TV_INFO; |
#define MAX_SUPPORTED_TV_TIMING_V1_2 3 |
typedef struct _ATOM_ANALOG_TV_INFO_V1_2 { |
ATOM_COMMON_TABLE_HEADER sHeader; |
UCHAR ucTV_SupportedStandard; |
UCHAR ucTV_BootUpDefaultStandard; |
UCHAR ucExt_TV_ASIC_ID; |
UCHAR ucExt_TV_ASIC_SlaveAddr; |
ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; |
} ATOM_ANALOG_TV_INFO_V1_2; |
/**************************************************************************/ |
/* VRAM usage and their defintions */ |
/drivers/video/drm/radeon/r300_reg.h |
---|
27,11 → 27,9 |
#ifndef _R300_REG_H_ |
#define _R300_REG_H_ |
#define R300_SURF_TILE_MACRO (1<<16) |
#define R300_SURF_TILE_MICRO (2<<16) |
#define R300_SURF_TILE_BOTH (3<<16) |
#define R300_MC_INIT_MISC_LAT_TIMER 0x180 |
# define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT 0 |
# define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT 4 |
/drivers/video/drm/radeon/r500_reg.h |
---|
350,7 → 350,6 |
#define AVIVO_D1CRTC_BLANK_CONTROL 0x6084 |
#define AVIVO_D1CRTC_INTERLACE_CONTROL 0x6088 |
#define AVIVO_D1CRTC_INTERLACE_STATUS 0x608c |
#define AVIVO_D1CRTC_FRAME_COUNT 0x60a4 |
#define AVIVO_D1CRTC_STEREO_CONTROL 0x60c4 |
/* master controls */ |
439,15 → 438,13 |
# define AVIVO_DC_LB_DISP1_END_ADR_SHIFT 4 |
# define AVIVO_DC_LB_DISP1_END_ADR_MASK 0x7ff |
#define R500_DxMODE_INT_MASK 0x6540 |
#define R500_D1MODE_INT_MASK (1<<0) |
#define R500_D2MODE_INT_MASK (1<<8) |
#define AVIVO_D1MODE_DATA_FORMAT 0x6528 |
# define AVIVO_D1MODE_INTERLEAVE_EN (1 << 0) |
#define AVIVO_D1MODE_DESKTOP_HEIGHT 0x652C |
#define AVIVO_D1MODE_VBLANK_STATUS 0x6534 |
# define AVIVO_VBLANK_ACK (1 << 4) |
#define AVIVO_D1MODE_VLINE_START_END 0x6538 |
#define AVIVO_DxMODE_INT_MASK 0x6540 |
# define AVIVO_D1MODE_INT_MASK (1 << 0) |
# define AVIVO_D2MODE_INT_MASK (1 << 8) |
#define AVIVO_D1MODE_VIEWPORT_START 0x6580 |
#define AVIVO_D1MODE_VIEWPORT_SIZE 0x6584 |
#define AVIVO_D1MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6588 |
477,7 → 474,6 |
#define AVIVO_D2CRTC_BLANK_CONTROL 0x6884 |
#define AVIVO_D2CRTC_INTERLACE_CONTROL 0x6888 |
#define AVIVO_D2CRTC_INTERLACE_STATUS 0x688c |
#define AVIVO_D2CRTC_FRAME_COUNT 0x68a4 |
#define AVIVO_D2CRTC_STEREO_CONTROL 0x68c4 |
#define AVIVO_D2GRPH_ENABLE 0x6900 |
500,8 → 496,6 |
#define AVIVO_D2CUR_SIZE 0x6c10 |
#define AVIVO_D2CUR_POSITION 0x6c14 |
#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 |
#define AVIVO_D2MODE_VLINE_START_END 0x6d38 |
#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 |
#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 |
#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 |
752,8 → 746,4 |
# define AVIVO_I2C_EN (1 << 0) |
# define AVIVO_I2C_RESET (1 << 8) |
#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc |
# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4) |
# define AVIVO_D2_VBLANK_INTERRUPT (1 << 5) |
#endif |
/drivers/video/drm/radeon/radeon_drm.h |
---|
0,0 → 1,884 |
/* radeon_drm.h -- Public header for the radeon driver -*- linux-c -*- |
* |
* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. |
* Copyright 2000 VA Linux Systems, Inc., Fremont, California. |
* Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. |
* All rights reserved. |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
* to deal in the Software without restriction, including without limitation |
* the rights to use, copy, modify, merge, publish, distribute, sublicense, |
* and/or sell copies of the Software, and to permit persons to whom the |
* Software is furnished to do so, subject to the following conditions: |
* |
* The above copyright notice and this permission notice (including the next |
* paragraph) shall be included in all copies or substantial portions of the |
* Software. |
* |
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
* DEALINGS IN THE SOFTWARE. |
* |
* Authors: |
* Kevin E. Martin <martin@valinux.com> |
* Gareth Hughes <gareth@valinux.com> |
* Keith Whitwell <keith@tungstengraphics.com> |
*/ |
#ifndef __RADEON_DRM_H__ |
#define __RADEON_DRM_H__ |
#include "types.h" |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the X server file (radeon_sarea.h) |
*/ |
#ifndef __RADEON_SAREA_DEFINES__ |
#define __RADEON_SAREA_DEFINES__ |
/* Old style state flags, required for sarea interface (1.1 and 1.2 |
* clears) and 1.2 drm_vertex2 ioctl. |
*/ |
#define RADEON_UPLOAD_CONTEXT 0x00000001 |
#define RADEON_UPLOAD_VERTFMT 0x00000002 |
#define RADEON_UPLOAD_LINE 0x00000004 |
#define RADEON_UPLOAD_BUMPMAP 0x00000008 |
#define RADEON_UPLOAD_MASKS 0x00000010 |
#define RADEON_UPLOAD_VIEWPORT 0x00000020 |
#define RADEON_UPLOAD_SETUP 0x00000040 |
#define RADEON_UPLOAD_TCL 0x00000080 |
#define RADEON_UPLOAD_MISC 0x00000100 |
#define RADEON_UPLOAD_TEX0 0x00000200 |
#define RADEON_UPLOAD_TEX1 0x00000400 |
#define RADEON_UPLOAD_TEX2 0x00000800 |
#define RADEON_UPLOAD_TEX0IMAGES 0x00001000 |
#define RADEON_UPLOAD_TEX1IMAGES 0x00002000 |
#define RADEON_UPLOAD_TEX2IMAGES 0x00004000 |
#define RADEON_UPLOAD_CLIPRECTS 0x00008000 /* handled client-side */ |
#define RADEON_REQUIRE_QUIESCENCE 0x00010000 |
#define RADEON_UPLOAD_ZBIAS 0x00020000 /* version 1.2 and newer */ |
#define RADEON_UPLOAD_ALL 0x003effff |
#define RADEON_UPLOAD_CONTEXT_ALL 0x003e01ff |
/* New style per-packet identifiers for use in cmd_buffer ioctl with |
* the RADEON_EMIT_PACKET command. Comments relate new packets to old |
* state bits and the packet size: |
*/ |
#define RADEON_EMIT_PP_MISC 0 /* context/7 */ |
#define RADEON_EMIT_PP_CNTL 1 /* context/3 */ |
#define RADEON_EMIT_RB3D_COLORPITCH 2 /* context/1 */ |
#define RADEON_EMIT_RE_LINE_PATTERN 3 /* line/2 */ |
#define RADEON_EMIT_SE_LINE_WIDTH 4 /* line/1 */ |
#define RADEON_EMIT_PP_LUM_MATRIX 5 /* bumpmap/1 */ |
#define RADEON_EMIT_PP_ROT_MATRIX_0 6 /* bumpmap/2 */ |
#define RADEON_EMIT_RB3D_STENCILREFMASK 7 /* masks/3 */ |
#define RADEON_EMIT_SE_VPORT_XSCALE 8 /* viewport/6 */ |
#define RADEON_EMIT_SE_CNTL 9 /* setup/2 */ |
#define RADEON_EMIT_SE_CNTL_STATUS 10 /* setup/1 */ |
#define RADEON_EMIT_RE_MISC 11 /* misc/1 */ |
#define RADEON_EMIT_PP_TXFILTER_0 12 /* tex0/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_0 13 /* tex0/1 */ |
#define RADEON_EMIT_PP_TXFILTER_1 14 /* tex1/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_1 15 /* tex1/1 */ |
#define RADEON_EMIT_PP_TXFILTER_2 16 /* tex2/6 */ |
#define RADEON_EMIT_PP_BORDER_COLOR_2 17 /* tex2/1 */ |
#define RADEON_EMIT_SE_ZBIAS_FACTOR 18 /* zbias/2 */ |
#define RADEON_EMIT_SE_TCL_OUTPUT_VTX_FMT 19 /* tcl/11 */ |
#define RADEON_EMIT_SE_TCL_MATERIAL_EMMISSIVE_RED 20 /* material/17 */ |
#define R200_EMIT_PP_TXCBLEND_0 21 /* tex0/4 */ |
#define R200_EMIT_PP_TXCBLEND_1 22 /* tex1/4 */ |
#define R200_EMIT_PP_TXCBLEND_2 23 /* tex2/4 */ |
#define R200_EMIT_PP_TXCBLEND_3 24 /* tex3/4 */ |
#define R200_EMIT_PP_TXCBLEND_4 25 /* tex4/4 */ |
#define R200_EMIT_PP_TXCBLEND_5 26 /* tex5/4 */ |
#define R200_EMIT_PP_TXCBLEND_6 27 /* /4 */ |
#define R200_EMIT_PP_TXCBLEND_7 28 /* /4 */ |
#define R200_EMIT_TCL_LIGHT_MODEL_CTL_0 29 /* tcl/7 */ |
#define R200_EMIT_TFACTOR_0 30 /* tf/7 */ |
#define R200_EMIT_VTX_FMT_0 31 /* vtx/5 */ |
#define R200_EMIT_VAP_CTL 32 /* vap/1 */ |
#define R200_EMIT_MATRIX_SELECT_0 33 /* msl/5 */ |
#define R200_EMIT_TEX_PROC_CTL_2 34 /* tcg/5 */ |
#define R200_EMIT_TCL_UCP_VERT_BLEND_CTL 35 /* tcl/1 */ |
#define R200_EMIT_PP_TXFILTER_0 36 /* tex0/6 */ |
#define R200_EMIT_PP_TXFILTER_1 37 /* tex1/6 */ |
#define R200_EMIT_PP_TXFILTER_2 38 /* tex2/6 */ |
#define R200_EMIT_PP_TXFILTER_3 39 /* tex3/6 */ |
#define R200_EMIT_PP_TXFILTER_4 40 /* tex4/6 */ |
#define R200_EMIT_PP_TXFILTER_5 41 /* tex5/6 */ |
#define R200_EMIT_PP_TXOFFSET_0 42 /* tex0/1 */ |
#define R200_EMIT_PP_TXOFFSET_1 43 /* tex1/1 */ |
#define R200_EMIT_PP_TXOFFSET_2 44 /* tex2/1 */ |
#define R200_EMIT_PP_TXOFFSET_3 45 /* tex3/1 */ |
#define R200_EMIT_PP_TXOFFSET_4 46 /* tex4/1 */ |
#define R200_EMIT_PP_TXOFFSET_5 47 /* tex5/1 */ |
#define R200_EMIT_VTE_CNTL 48 /* vte/1 */ |
#define R200_EMIT_OUTPUT_VTX_COMP_SEL 49 /* vtx/1 */ |
#define R200_EMIT_PP_TAM_DEBUG3 50 /* tam/1 */ |
#define R200_EMIT_PP_CNTL_X 51 /* cst/1 */ |
#define R200_EMIT_RB3D_DEPTHXY_OFFSET 52 /* cst/1 */ |
#define R200_EMIT_RE_AUX_SCISSOR_CNTL 53 /* cst/1 */ |
#define R200_EMIT_RE_SCISSOR_TL_0 54 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_1 55 /* cst/2 */ |
#define R200_EMIT_RE_SCISSOR_TL_2 56 /* cst/2 */ |
#define R200_EMIT_SE_VAP_CNTL_STATUS 57 /* cst/1 */ |
#define R200_EMIT_SE_VTX_STATE_CNTL 58 /* cst/1 */ |
#define R200_EMIT_RE_POINTSIZE 59 /* cst/1 */ |
#define R200_EMIT_TCL_INPUT_VTX_VECTOR_ADDR_0 60 /* cst/4 */ |
#define R200_EMIT_PP_CUBIC_FACES_0 61 |
#define R200_EMIT_PP_CUBIC_OFFSETS_0 62 |
#define R200_EMIT_PP_CUBIC_FACES_1 63 |
#define R200_EMIT_PP_CUBIC_OFFSETS_1 64 |
#define R200_EMIT_PP_CUBIC_FACES_2 65 |
#define R200_EMIT_PP_CUBIC_OFFSETS_2 66 |
#define R200_EMIT_PP_CUBIC_FACES_3 67 |
#define R200_EMIT_PP_CUBIC_OFFSETS_3 68 |
#define R200_EMIT_PP_CUBIC_FACES_4 69 |
#define R200_EMIT_PP_CUBIC_OFFSETS_4 70 |
#define R200_EMIT_PP_CUBIC_FACES_5 71 |
#define R200_EMIT_PP_CUBIC_OFFSETS_5 72 |
#define RADEON_EMIT_PP_TEX_SIZE_0 73 |
#define RADEON_EMIT_PP_TEX_SIZE_1 74 |
#define RADEON_EMIT_PP_TEX_SIZE_2 75 |
#define R200_EMIT_RB3D_BLENDCOLOR 76 |
#define R200_EMIT_TCL_POINT_SPRITE_CNTL 77 |
#define RADEON_EMIT_PP_CUBIC_FACES_0 78 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T0 79 |
#define RADEON_EMIT_PP_CUBIC_FACES_1 80 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T1 81 |
#define RADEON_EMIT_PP_CUBIC_FACES_2 82 |
#define RADEON_EMIT_PP_CUBIC_OFFSETS_T2 83 |
#define R200_EMIT_PP_TRI_PERF_CNTL 84 |
#define R200_EMIT_PP_AFS_0 85 |
#define R200_EMIT_PP_AFS_1 86 |
#define R200_EMIT_ATF_TFACTOR 87 |
#define R200_EMIT_PP_TXCTLALL_0 88 |
#define R200_EMIT_PP_TXCTLALL_1 89 |
#define R200_EMIT_PP_TXCTLALL_2 90 |
#define R200_EMIT_PP_TXCTLALL_3 91 |
#define R200_EMIT_PP_TXCTLALL_4 92 |
#define R200_EMIT_PP_TXCTLALL_5 93 |
#define R200_EMIT_VAP_PVS_CNTL 94 |
#define RADEON_MAX_STATE_PACKETS 95 |
/* Commands understood by cmd_buffer ioctl. More can be added but |
* obviously these can't be removed or changed: |
*/ |
#define RADEON_CMD_PACKET 1 /* emit one of the register packets above */ |
#define RADEON_CMD_SCALARS 2 /* emit scalar data */ |
#define RADEON_CMD_VECTORS 3 /* emit vector data */ |
#define RADEON_CMD_DMA_DISCARD 4 /* discard current dma buf */ |
#define RADEON_CMD_PACKET3 5 /* emit hw packet */ |
#define RADEON_CMD_PACKET3_CLIP 6 /* emit hw packet wrapped in cliprects */ |
#define RADEON_CMD_SCALARS2 7 /* r200 stopgap */ |
#define RADEON_CMD_WAIT 8 /* emit hw wait commands -- note: |
* doesn't make the cpu wait, just |
* the graphics hardware */ |
#define RADEON_CMD_VECLINEAR 9 /* another r200 stopgap */ |
typedef union { |
int i; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, packet_id, pad0, pad1; |
} packet; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} scalars; |
struct { |
unsigned char cmd_type, offset, stride, count; |
} vectors; |
struct { |
unsigned char cmd_type, addr_lo, addr_hi, count; |
} veclinear; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
} drm_radeon_cmd_header_t; |
#define RADEON_WAIT_2D 0x1 |
#define RADEON_WAIT_3D 0x2 |
/* Allowed parameters for R300_CMD_PACKET3 |
*/ |
#define R300_CMD_PACKET3_CLEAR 0 |
#define R300_CMD_PACKET3_RAW 1 |
/* Commands understood by cmd_buffer ioctl for R300. |
* The interface has not been stabilized, so some of these may be removed |
* and eventually reordered before stabilization. |
*/ |
#define R300_CMD_PACKET0 1 |
#define R300_CMD_VPU 2 /* emit vertex program upload */ |
#define R300_CMD_PACKET3 3 /* emit a packet3 */ |
#define R300_CMD_END3D 4 /* emit sequence ending 3d rendering */ |
#define R300_CMD_CP_DELAY 5 |
#define R300_CMD_DMA_DISCARD 6 |
#define R300_CMD_WAIT 7 |
# define R300_WAIT_2D 0x1 |
# define R300_WAIT_3D 0x2 |
/* these two defines are DOING IT WRONG - however |
* we have userspace which relies on using these. |
* The wait interface is backwards compat new |
* code should use the NEW_WAIT defines below |
* THESE ARE NOT BIT FIELDS |
*/ |
# define R300_WAIT_2D_CLEAN 0x3 |
# define R300_WAIT_3D_CLEAN 0x4 |
# define R300_NEW_WAIT_2D_3D 0x3 |
# define R300_NEW_WAIT_2D_2D_CLEAN 0x4 |
# define R300_NEW_WAIT_3D_3D_CLEAN 0x6 |
# define R300_NEW_WAIT_2D_2D_CLEAN_3D_3D_CLEAN 0x8 |
#define R300_CMD_SCRATCH 8 |
#define R300_CMD_R500FP 9 |
typedef union { |
unsigned int u; |
struct { |
unsigned char cmd_type, pad0, pad1, pad2; |
} header; |
struct { |
unsigned char cmd_type, count, reglo, reghi; |
} packet0; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi; |
} vpu; |
struct { |
unsigned char cmd_type, packet, pad0, pad1; |
} packet3; |
struct { |
unsigned char cmd_type, packet; |
unsigned short count; /* amount of packet2 to emit */ |
} delay; |
struct { |
unsigned char cmd_type, buf_idx, pad0, pad1; |
} dma; |
struct { |
unsigned char cmd_type, flags, pad0, pad1; |
} wait; |
struct { |
unsigned char cmd_type, reg, n_bufs, flags; |
} scratch; |
struct { |
unsigned char cmd_type, count, adrlo, adrhi_flags; |
} r500fp; |
} drm_r300_cmd_header_t; |
#define RADEON_FRONT 0x1 |
#define RADEON_BACK 0x2 |
#define RADEON_DEPTH 0x4 |
#define RADEON_STENCIL 0x8 |
#define RADEON_CLEAR_FASTZ 0x80000000 |
#define RADEON_USE_HIERZ 0x40000000 |
#define RADEON_USE_COMP_ZBUF 0x20000000 |
#define R500FP_CONSTANT_TYPE (1 << 1) |
#define R500FP_CONSTANT_CLAMP (1 << 2) |
/* Primitive types |
*/ |
#define RADEON_POINTS 0x1 |
#define RADEON_LINES 0x2 |
#define RADEON_LINE_STRIP 0x3 |
#define RADEON_TRIANGLES 0x4 |
#define RADEON_TRIANGLE_FAN 0x5 |
#define RADEON_TRIANGLE_STRIP 0x6 |
/* Vertex/indirect buffer size |
*/ |
#define RADEON_BUFFER_SIZE 65536 |
/* Byte offsets for indirect buffer data |
*/ |
#define RADEON_INDEX_PRIM_OFFSET 20 |
#define RADEON_SCRATCH_REG_OFFSET 32 |
#define R600_SCRATCH_REG_OFFSET 256 |
#define RADEON_NR_SAREA_CLIPRECTS 12 |
/* There are 2 heaps (local/GART). Each region within a heap is a |
* minimum of 64k, and there are at most 64 of them per heap. |
*/ |
#define RADEON_LOCAL_TEX_HEAP 0 |
#define RADEON_GART_TEX_HEAP 1 |
#define RADEON_NR_TEX_HEAPS 2 |
#define RADEON_NR_TEX_REGIONS 64 |
#define RADEON_LOG_TEX_GRANULARITY 16 |
#define RADEON_MAX_TEXTURE_LEVELS 12 |
#define RADEON_MAX_TEXTURE_UNITS 3 |
#define RADEON_MAX_SURFACES 8 |
/* Blits have strict offset rules. All blit offset must be aligned on |
* a 1K-byte boundary. |
*/ |
#define RADEON_OFFSET_SHIFT 10 |
#define RADEON_OFFSET_ALIGN (1 << RADEON_OFFSET_SHIFT) |
#define RADEON_OFFSET_MASK (RADEON_OFFSET_ALIGN - 1) |
#endif /* __RADEON_SAREA_DEFINES__ */ |
typedef struct { |
unsigned int red; |
unsigned int green; |
unsigned int blue; |
unsigned int alpha; |
} radeon_color_regs_t; |
typedef struct { |
/* Context state */ |
unsigned int pp_misc; /* 0x1c14 */ |
unsigned int pp_fog_color; |
unsigned int re_solid_color; |
unsigned int rb3d_blendcntl; |
unsigned int rb3d_depthoffset; |
unsigned int rb3d_depthpitch; |
unsigned int rb3d_zstencilcntl; |
unsigned int pp_cntl; /* 0x1c38 */ |
unsigned int rb3d_cntl; |
unsigned int rb3d_coloroffset; |
unsigned int re_width_height; |
unsigned int rb3d_colorpitch; |
unsigned int se_cntl; |
/* Vertex format state */ |
unsigned int se_coord_fmt; /* 0x1c50 */ |
/* Line state */ |
unsigned int re_line_pattern; /* 0x1cd0 */ |
unsigned int re_line_state; |
unsigned int se_line_width; /* 0x1db8 */ |
/* Bumpmap state */ |
unsigned int pp_lum_matrix; /* 0x1d00 */ |
unsigned int pp_rot_matrix_0; /* 0x1d58 */ |
unsigned int pp_rot_matrix_1; |
/* Mask state */ |
unsigned int rb3d_stencilrefmask; /* 0x1d7c */ |
unsigned int rb3d_ropcntl; |
unsigned int rb3d_planemask; |
/* Viewport state */ |
unsigned int se_vport_xscale; /* 0x1d98 */ |
unsigned int se_vport_xoffset; |
unsigned int se_vport_yscale; |
unsigned int se_vport_yoffset; |
unsigned int se_vport_zscale; |
unsigned int se_vport_zoffset; |
/* Setup state */ |
unsigned int se_cntl_status; /* 0x2140 */ |
/* Misc state */ |
unsigned int re_top_left; /* 0x26c0 */ |
unsigned int re_misc; |
} drm_radeon_context_regs_t; |
typedef struct { |
/* Zbias state */ |
unsigned int se_zbias_factor; /* 0x1dac */ |
unsigned int se_zbias_constant; |
} drm_radeon_context2_regs_t; |
/* Setup registers for each texture unit |
*/ |
typedef struct { |
unsigned int pp_txfilter; |
unsigned int pp_txformat; |
unsigned int pp_txoffset; |
unsigned int pp_txcblend; |
unsigned int pp_txablend; |
unsigned int pp_tfactor; |
unsigned int pp_border_color; |
} drm_radeon_texture_regs_t; |
typedef struct { |
unsigned int start; |
unsigned int finish; |
unsigned int prim:8; |
unsigned int stateidx:8; |
unsigned int numverts:16; /* overloaded as offset/64 for elt prims */ |
unsigned int vc_format; /* vertex format */ |
} drm_radeon_prim_t; |
typedef struct { |
drm_radeon_context_regs_t context; |
drm_radeon_texture_regs_t tex[RADEON_MAX_TEXTURE_UNITS]; |
drm_radeon_context2_regs_t context2; |
unsigned int dirty; |
} drm_radeon_state_t; |
typedef struct { |
/* The channel for communication of state information to the |
* kernel on firing a vertex buffer with either of the |
* obsoleted vertex/index ioctls. |
*/ |
drm_radeon_context_regs_t context_state; |
drm_radeon_texture_regs_t tex_state[RADEON_MAX_TEXTURE_UNITS]; |
unsigned int dirty; |
unsigned int vertsize; |
unsigned int vc_format; |
/* The current cliprects, or a subset thereof. |
*/ |
// struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS]; |
unsigned int nbox; |
/* Counters for client-side throttling of rendering clients. |
*/ |
unsigned int last_frame; |
unsigned int last_dispatch; |
unsigned int last_clear; |
// struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS + |
// 1]; |
unsigned int tex_age[RADEON_NR_TEX_HEAPS]; |
int ctx_owner; |
int pfState; /* number of 3d windows (0,1,2ormore) */ |
int pfCurrentPage; /* which buffer is being displayed? */ |
int crtc2_base; /* CRTC2 frame offset */ |
int tiling_enabled; /* set by drm, read by 2d + 3d clients */ |
} drm_radeon_sarea_t; |
/* WARNING: If you change any of these defines, make sure to change the |
* defines in the Xserver file (xf86drmRadeon.h) |
* |
* KW: actually it's illegal to change any of this (backwards compatibility). |
*/ |
/* Radeon specific ioctls |
* The device specific ioctl range is 0x40 to 0x79. |
*/ |
#define DRM_RADEON_CP_INIT 0x00 |
#define DRM_RADEON_CP_START 0x01 |
#define DRM_RADEON_CP_STOP 0x02 |
#define DRM_RADEON_CP_RESET 0x03 |
#define DRM_RADEON_CP_IDLE 0x04 |
#define DRM_RADEON_RESET 0x05 |
#define DRM_RADEON_FULLSCREEN 0x06 |
#define DRM_RADEON_SWAP 0x07 |
#define DRM_RADEON_CLEAR 0x08 |
#define DRM_RADEON_VERTEX 0x09 |
#define DRM_RADEON_INDICES 0x0A |
#define DRM_RADEON_NOT_USED |
#define DRM_RADEON_STIPPLE 0x0C |
#define DRM_RADEON_INDIRECT 0x0D |
#define DRM_RADEON_TEXTURE 0x0E |
#define DRM_RADEON_VERTEX2 0x0F |
#define DRM_RADEON_CMDBUF 0x10 |
#define DRM_RADEON_GETPARAM 0x11 |
#define DRM_RADEON_FLIP 0x12 |
#define DRM_RADEON_ALLOC 0x13 |
#define DRM_RADEON_FREE 0x14 |
#define DRM_RADEON_INIT_HEAP 0x15 |
#define DRM_RADEON_IRQ_EMIT 0x16 |
#define DRM_RADEON_IRQ_WAIT 0x17 |
#define DRM_RADEON_CP_RESUME 0x18 |
#define DRM_RADEON_SETPARAM 0x19 |
#define DRM_RADEON_SURF_ALLOC 0x1a |
#define DRM_RADEON_SURF_FREE 0x1b |
/* KMS ioctl */ |
#define DRM_RADEON_GEM_INFO 0x1c |
#define DRM_RADEON_GEM_CREATE 0x1d |
#define DRM_RADEON_GEM_MMAP 0x1e |
#define DRM_RADEON_GEM_PREAD 0x21 |
#define DRM_RADEON_GEM_PWRITE 0x22 |
#define DRM_RADEON_GEM_SET_DOMAIN 0x23 |
#define DRM_RADEON_GEM_WAIT_IDLE 0x24 |
#define DRM_RADEON_CS 0x26 |
#define DRM_RADEON_INFO 0x27 |
#define DRM_IOCTL_RADEON_CP_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_INIT, drm_radeon_init_t) |
#define DRM_IOCTL_RADEON_CP_START DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_START) |
#define DRM_IOCTL_RADEON_CP_STOP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CP_STOP, drm_radeon_cp_stop_t) |
#define DRM_IOCTL_RADEON_CP_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESET) |
#define DRM_IOCTL_RADEON_CP_IDLE DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_IDLE) |
#define DRM_IOCTL_RADEON_RESET DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_RESET) |
#define DRM_IOCTL_RADEON_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FULLSCREEN, drm_radeon_fullscreen_t) |
#define DRM_IOCTL_RADEON_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_SWAP) |
#define DRM_IOCTL_RADEON_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CLEAR, drm_radeon_clear_t) |
#define DRM_IOCTL_RADEON_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX, drm_radeon_vertex_t) |
#define DRM_IOCTL_RADEON_INDICES DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INDICES, drm_radeon_indices_t) |
#define DRM_IOCTL_RADEON_STIPPLE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_STIPPLE, drm_radeon_stipple_t) |
#define DRM_IOCTL_RADEON_INDIRECT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INDIRECT, drm_radeon_indirect_t) |
#define DRM_IOCTL_RADEON_TEXTURE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_TEXTURE, drm_radeon_texture_t) |
#define DRM_IOCTL_RADEON_VERTEX2 DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_VERTEX2, drm_radeon_vertex2_t) |
#define DRM_IOCTL_RADEON_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_CMDBUF, drm_radeon_cmd_buffer_t) |
#define DRM_IOCTL_RADEON_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GETPARAM, drm_radeon_getparam_t) |
#define DRM_IOCTL_RADEON_FLIP DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_FLIP) |
#define DRM_IOCTL_RADEON_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_ALLOC, drm_radeon_mem_alloc_t) |
#define DRM_IOCTL_RADEON_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_FREE, drm_radeon_mem_free_t) |
#define DRM_IOCTL_RADEON_INIT_HEAP DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_INIT_HEAP, drm_radeon_mem_init_heap_t) |
#define DRM_IOCTL_RADEON_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_IRQ_EMIT, drm_radeon_irq_emit_t) |
#define DRM_IOCTL_RADEON_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_IRQ_WAIT, drm_radeon_irq_wait_t) |
#define DRM_IOCTL_RADEON_CP_RESUME DRM_IO( DRM_COMMAND_BASE + DRM_RADEON_CP_RESUME) |
#define DRM_IOCTL_RADEON_SETPARAM DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SETPARAM, drm_radeon_setparam_t) |
#define DRM_IOCTL_RADEON_SURF_ALLOC DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_ALLOC, drm_radeon_surface_alloc_t) |
#define DRM_IOCTL_RADEON_SURF_FREE DRM_IOW( DRM_COMMAND_BASE + DRM_RADEON_SURF_FREE, drm_radeon_surface_free_t) |
/* KMS */ |
#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info) |
#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create) |
#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap) |
#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread) |
#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite) |
#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain) |
#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
#define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
typedef struct drm_radeon_init { |
enum { |
RADEON_INIT_CP = 0x01, |
RADEON_CLEANUP_CP = 0x02, |
RADEON_INIT_R200_CP = 0x03, |
RADEON_INIT_R300_CP = 0x04, |
RADEON_INIT_R600_CP = 0x05 |
} func; |
unsigned long sarea_priv_offset; |
int is_pci; |
int cp_mode; |
int gart_size; |
int ring_size; |
int usec_timeout; |
unsigned int fb_bpp; |
unsigned int front_offset, front_pitch; |
unsigned int back_offset, back_pitch; |
unsigned int depth_bpp; |
unsigned int depth_offset, depth_pitch; |
unsigned long fb_offset; |
unsigned long mmio_offset; |
unsigned long ring_offset; |
unsigned long ring_rptr_offset; |
unsigned long buffers_offset; |
unsigned long gart_textures_offset; |
} drm_radeon_init_t; |
typedef struct drm_radeon_cp_stop { |
int flush; |
int idle; |
} drm_radeon_cp_stop_t; |
typedef struct drm_radeon_fullscreen { |
enum { |
RADEON_INIT_FULLSCREEN = 0x01, |
RADEON_CLEANUP_FULLSCREEN = 0x02 |
} func; |
} drm_radeon_fullscreen_t; |
#define CLEAR_X1 0 |
#define CLEAR_Y1 1 |
#define CLEAR_X2 2 |
#define CLEAR_Y2 3 |
#define CLEAR_DEPTH 4 |
typedef union drm_radeon_clear_rect { |
float f[5]; |
unsigned int ui[5]; |
} drm_radeon_clear_rect_t; |
typedef struct drm_radeon_clear { |
unsigned int flags; |
unsigned int clear_color; |
unsigned int clear_depth; |
unsigned int color_mask; |
unsigned int depth_mask; /* misnamed field: should be stencil */ |
// drm_radeon_clear_rect_t __user *depth_boxes; |
} drm_radeon_clear_t; |
typedef struct drm_radeon_vertex { |
int prim; |
int idx; /* Index of vertex buffer */ |
int count; /* Number of vertices in buffer */ |
int discard; /* Client finished with buffer? */ |
} drm_radeon_vertex_t; |
typedef struct drm_radeon_indices { |
int prim; |
int idx; |
int start; |
int end; |
int discard; /* Client finished with buffer? */ |
} drm_radeon_indices_t; |
/* v1.2 - obsoletes drm_radeon_vertex and drm_radeon_indices |
* - allows multiple primitives and state changes in a single ioctl |
* - supports driver change to emit native primitives |
*/ |
typedef struct drm_radeon_vertex2 { |
int idx; /* Index of vertex buffer */ |
int discard; /* Client finished with buffer? */ |
int nr_states; |
// drm_radeon_state_t __user *state; |
int nr_prims; |
// drm_radeon_prim_t __user *prim; |
} drm_radeon_vertex2_t; |
/* v1.3 - obsoletes drm_radeon_vertex2 |
* - allows arbitarily large cliprect list |
* - allows updating of tcl packet, vector and scalar state |
* - allows memory-efficient description of state updates |
* - allows state to be emitted without a primitive |
* (for clears, ctx switches) |
* - allows more than one dma buffer to be referenced per ioctl |
* - supports tcl driver |
* - may be extended in future versions with new cmd types, packets |
*/ |
typedef struct drm_radeon_cmd_buffer { |
int bufsz; |
char __user *buf; |
int nbox; |
struct drm_clip_rect __user *boxes; |
} drm_radeon_cmd_buffer_t; |
typedef struct drm_radeon_tex_image { |
unsigned int x, y; /* Blit coordinates */ |
unsigned int width, height; |
const void __user *data; |
} drm_radeon_tex_image_t; |
typedef struct drm_radeon_texture { |
unsigned int offset; |
int pitch; |
int format; |
int width; /* Texture image coordinates */ |
int height; |
drm_radeon_tex_image_t __user *image; |
} drm_radeon_texture_t; |
typedef struct drm_radeon_stipple { |
unsigned int __user *mask; |
} drm_radeon_stipple_t; |
typedef struct drm_radeon_indirect { |
int idx; |
int start; |
int end; |
int discard; |
} drm_radeon_indirect_t; |
/* enum for card type parameters */ |
#define RADEON_CARD_PCI 0 |
#define RADEON_CARD_AGP 1 |
#define RADEON_CARD_PCIE 2 |
/* 1.3: An ioctl to get parameters that aren't available to the 3d |
* client any other way. |
*/ |
#define RADEON_PARAM_GART_BUFFER_OFFSET 1 /* card offset of 1st GART buffer */ |
#define RADEON_PARAM_LAST_FRAME 2 |
#define RADEON_PARAM_LAST_DISPATCH 3 |
#define RADEON_PARAM_LAST_CLEAR 4 |
/* Added with DRM version 1.6. */ |
#define RADEON_PARAM_IRQ_NR 5 |
#define RADEON_PARAM_GART_BASE 6 /* card offset of GART base */ |
/* Added with DRM version 1.8. */ |
#define RADEON_PARAM_REGISTER_HANDLE 7 /* for drmMap() */ |
#define RADEON_PARAM_STATUS_HANDLE 8 |
#define RADEON_PARAM_SAREA_HANDLE 9 |
#define RADEON_PARAM_GART_TEX_HANDLE 10 |
#define RADEON_PARAM_SCRATCH_OFFSET 11 |
#define RADEON_PARAM_CARD_TYPE 12 |
#define RADEON_PARAM_VBLANK_CRTC 13 /* VBLANK CRTC */ |
#define RADEON_PARAM_FB_LOCATION 14 /* FB location */ |
#define RADEON_PARAM_NUM_GB_PIPES 15 /* num GB pipes */ |
#define RADEON_PARAM_DEVICE_ID 16 |
typedef struct drm_radeon_getparam { |
int param; |
void __user *value; |
} drm_radeon_getparam_t; |
/* 1.6: Set up a memory manager for regions of shared memory: |
*/ |
#define RADEON_MEM_REGION_GART 1 |
#define RADEON_MEM_REGION_FB 2 |
typedef struct drm_radeon_mem_alloc { |
int region; |
int alignment; |
int size; |
int __user *region_offset; /* offset from start of fb or GART */ |
} drm_radeon_mem_alloc_t; |
typedef struct drm_radeon_mem_free { |
int region; |
int region_offset; |
} drm_radeon_mem_free_t; |
typedef struct drm_radeon_mem_init_heap { |
int region; |
int size; |
int start; |
} drm_radeon_mem_init_heap_t; |
/* 1.6: Userspace can request & wait on irq's: |
*/ |
typedef struct drm_radeon_irq_emit { |
int __user *irq_seq; |
} drm_radeon_irq_emit_t; |
typedef struct drm_radeon_irq_wait { |
int irq_seq; |
} drm_radeon_irq_wait_t; |
/* 1.10: Clients tell the DRM where they think the framebuffer is located in |
* the card's address space, via a new generic ioctl to set parameters |
*/ |
typedef struct drm_radeon_setparam { |
unsigned int param; |
__s64 value; |
} drm_radeon_setparam_t; |
#define RADEON_SETPARAM_FB_LOCATION 1 /* determined framebuffer location */ |
#define RADEON_SETPARAM_SWITCH_TILING 2 /* enable/disable color tiling */ |
#define RADEON_SETPARAM_PCIGART_LOCATION 3 /* PCI Gart Location */ |
#define RADEON_SETPARAM_NEW_MEMMAP 4 /* Use new memory map */ |
#define RADEON_SETPARAM_PCIGART_TABLE_SIZE 5 /* PCI GART Table Size */ |
#define RADEON_SETPARAM_VBLANK_CRTC 6 /* VBLANK CRTC */ |
/* 1.14: Clients can allocate/free a surface |
*/ |
typedef struct drm_radeon_surface_alloc { |
unsigned int address; |
unsigned int size; |
unsigned int flags; |
} drm_radeon_surface_alloc_t; |
typedef struct drm_radeon_surface_free { |
unsigned int address; |
} drm_radeon_surface_free_t; |
#define DRM_RADEON_VBLANK_CRTC1 1 |
#define DRM_RADEON_VBLANK_CRTC2 2 |
/* |
* Kernel modesetting world below. |
*/ |
#define RADEON_GEM_DOMAIN_CPU 0x1 |
#define RADEON_GEM_DOMAIN_GTT 0x2 |
#define RADEON_GEM_DOMAIN_VRAM 0x4 |
struct drm_radeon_gem_info { |
uint64_t gart_size; |
uint64_t vram_size; |
uint64_t vram_visible; |
}; |
#define RADEON_GEM_NO_BACKING_STORE 1 |
struct drm_radeon_gem_create { |
uint64_t size; |
uint64_t alignment; |
uint32_t handle; |
uint32_t initial_domain; |
uint32_t flags; |
}; |
struct drm_radeon_gem_mmap { |
uint32_t handle; |
uint32_t pad; |
uint64_t offset; |
uint64_t size; |
uint64_t addr_ptr; |
}; |
struct drm_radeon_gem_set_domain { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
}; |
struct drm_radeon_gem_wait_idle { |
uint32_t handle; |
uint32_t pad; |
}; |
struct drm_radeon_gem_busy { |
uint32_t handle; |
uint32_t busy; |
}; |
struct drm_radeon_gem_pread { |
/** Handle for the object being read. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to read from */ |
uint64_t offset; |
/** Length of data to read */ |
uint64_t size; |
/** Pointer to write the data into. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
struct drm_radeon_gem_pwrite { |
/** Handle for the object being written to. */ |
uint32_t handle; |
uint32_t pad; |
/** Offset into the object to write to */ |
uint64_t offset; |
/** Length of data to write */ |
uint64_t size; |
/** Pointer to read the data from. */ |
/* void *, but pointers are not 32/64 compatible */ |
uint64_t data_ptr; |
}; |
#define RADEON_CHUNK_ID_RELOCS 0x01 |
#define RADEON_CHUNK_ID_IB 0x02 |
struct drm_radeon_cs_chunk { |
uint32_t chunk_id; |
uint32_t length_dw; |
uint64_t chunk_data; |
}; |
struct drm_radeon_cs_reloc { |
uint32_t handle; |
uint32_t read_domains; |
uint32_t write_domain; |
uint32_t flags; |
}; |
struct drm_radeon_cs { |
uint32_t num_chunks; |
uint32_t cs_id; |
/* this points to uint64_t * which point to cs chunks */ |
uint64_t chunks; |
/* updates to the limits after this CS ioctl */ |
uint64_t gart_limit; |
uint64_t vram_limit; |
}; |
#define RADEON_INFO_DEVICE_ID 0x00 |
#define RADEON_INFO_NUM_GB_PIPES 0x01 |
struct drm_radeon_info { |
uint32_t request; |
uint32_t pad; |
uint64_t value; |
}; |
#endif |
/drivers/video/drm/radeon/radeon_reg.h |
---|
982,15 → 982,12 |
# define RS400_TMDS2_PLLRST (1 << 1) |
#define RADEON_GEN_INT_CNTL 0x0040 |
# define RADEON_CRTC_VBLANK_MASK (1 << 0) |
# define RADEON_CRTC2_VBLANK_MASK (1 << 9) |
# define RADEON_SW_INT_ENABLE (1 << 25) |
#define RADEON_GEN_INT_STATUS 0x0044 |
# define AVIVO_DISPLAY_INT_STATUS (1 << 0) |
# define RADEON_CRTC_VBLANK_STAT (1 << 0) |
# define RADEON_CRTC_VBLANK_STAT_ACK (1 << 0) |
# define RADEON_CRTC2_VBLANK_STAT (1 << 9) |
# define RADEON_CRTC2_VBLANK_STAT_ACK (1 << 9) |
# define RADEON_VSYNC_INT_AK (1 << 2) |
# define RADEON_VSYNC_INT (1 << 2) |
# define RADEON_VSYNC2_INT_AK (1 << 6) |
# define RADEON_VSYNC2_INT (1 << 6) |
# define RADEON_SW_INT_FIRE (1 << 26) |
# define RADEON_SW_INT_TEST (1 << 25) |
# define RADEON_SW_INT_TEST_ACK (1 << 25) |
1945,11 → 1942,6 |
# define RADEON_TXFORMAT_DXT1 (12 << 0) |
# define RADEON_TXFORMAT_DXT23 (14 << 0) |
# define RADEON_TXFORMAT_DXT45 (15 << 0) |
# define RADEON_TXFORMAT_SHADOW16 (16 << 0) |
# define RADEON_TXFORMAT_SHADOW32 (17 << 0) |
# define RADEON_TXFORMAT_DUDV88 (18 << 0) |
# define RADEON_TXFORMAT_LDUDV655 (19 << 0) |
# define RADEON_TXFORMAT_LDUDUV8888 (20 << 0) |
# define RADEON_TXFORMAT_FORMAT_MASK (31 << 0) |
# define RADEON_TXFORMAT_FORMAT_SHIFT 0 |
# define RADEON_TXFORMAT_APPLE_YUV_MODE (1 << 5) |
2208,7 → 2200,7 |
# define RADEON_ROP_ENABLE (1 << 6) |
# define RADEON_STENCIL_ENABLE (1 << 7) |
# define RADEON_Z_ENABLE (1 << 8) |
# define RADEON_DEPTHXY_OFFSET_ENABLE (1 << 9) |
# define RADEON_DEPTH_XZ_OFFEST_ENABLE (1 << 9) |
# define RADEON_RB3D_COLOR_FORMAT_SHIFT 10 |
# define RADEON_COLOR_FORMAT_ARGB1555 3 |
2342,9 → 2334,6 |
# define RADEON_RE_WIDTH_SHIFT 0 |
# define RADEON_RE_HEIGHT_SHIFT 16 |
#define RADEON_RB3D_ZPASS_DATA 0x3290 |
#define RADEON_RB3D_ZPASS_ADDR 0x3294 |
#define RADEON_SE_CNTL 0x1c4c |
# define RADEON_FFACE_CULL_CW (0 << 0) |
# define RADEON_FFACE_CULL_CCW (1 << 0) |
2778,12 → 2767,7 |
# define R200_TXFORMAT_DXT1 (12 << 0) |
# define R200_TXFORMAT_DXT23 (14 << 0) |
# define R200_TXFORMAT_DXT45 (15 << 0) |
# define R200_TXFORMAT_DVDU88 (18 << 0) |
# define R200_TXFORMAT_LDVDU655 (19 << 0) |
# define R200_TXFORMAT_LDVDU8888 (20 << 0) |
# define R200_TXFORMAT_GR1616 (21 << 0) |
# define R200_TXFORMAT_ABGR8888 (22 << 0) |
# define R200_TXFORMAT_BGR111110 (23 << 0) |
# define R200_TXFORMAT_FORMAT_MASK (31 << 0) |
# define R200_TXFORMAT_FORMAT_SHIFT 0 |
# define R200_TXFORMAT_ALPHA_IN_MAP (1 << 6) |
2828,13 → 2812,6 |
#define R200_PP_TXPITCH_4 0x2c90 /* NPOT only */ |
#define R200_PP_TXPITCH_5 0x2cb0 /* NPOT only */ |
#define R200_PP_CUBIC_FACES_0 0x2c18 |
#define R200_PP_CUBIC_FACES_1 0x2c38 |
#define R200_PP_CUBIC_FACES_2 0x2c58 |
#define R200_PP_CUBIC_FACES_3 0x2c78 |
#define R200_PP_CUBIC_FACES_4 0x2c98 |
#define R200_PP_CUBIC_FACES_5 0x2cb8 |
#define R200_PP_TXOFFSET_0 0x2d00 |
# define R200_TXO_ENDIAN_NO_SWAP (0 << 0) |
# define R200_TXO_ENDIAN_BYTE_SWAP (1 << 0) |
2846,44 → 2823,11 |
# define R200_TXO_MICRO_TILE (1 << 3) |
# define R200_TXO_OFFSET_MASK 0xffffffe0 |
# define R200_TXO_OFFSET_SHIFT 5 |
#define R200_PP_CUBIC_OFFSET_F1_0 0x2d04 |
#define R200_PP_CUBIC_OFFSET_F2_0 0x2d08 |
#define R200_PP_CUBIC_OFFSET_F3_0 0x2d0c |
#define R200_PP_CUBIC_OFFSET_F4_0 0x2d10 |
#define R200_PP_CUBIC_OFFSET_F5_0 0x2d14 |
#define R200_PP_TXOFFSET_1 0x2d18 |
#define R200_PP_CUBIC_OFFSET_F1_1 0x2d1c |
#define R200_PP_CUBIC_OFFSET_F2_1 0x2d20 |
#define R200_PP_CUBIC_OFFSET_F3_1 0x2d24 |
#define R200_PP_CUBIC_OFFSET_F4_1 0x2d28 |
#define R200_PP_CUBIC_OFFSET_F5_1 0x2d2c |
#define R200_PP_TXOFFSET_2 0x2d30 |
#define R200_PP_CUBIC_OFFSET_F1_2 0x2d34 |
#define R200_PP_CUBIC_OFFSET_F2_2 0x2d38 |
#define R200_PP_CUBIC_OFFSET_F3_2 0x2d3c |
#define R200_PP_CUBIC_OFFSET_F4_2 0x2d40 |
#define R200_PP_CUBIC_OFFSET_F5_2 0x2d44 |
#define R200_PP_TXOFFSET_3 0x2d48 |
#define R200_PP_CUBIC_OFFSET_F1_3 0x2d4c |
#define R200_PP_CUBIC_OFFSET_F2_3 0x2d50 |
#define R200_PP_CUBIC_OFFSET_F3_3 0x2d54 |
#define R200_PP_CUBIC_OFFSET_F4_3 0x2d58 |
#define R200_PP_CUBIC_OFFSET_F5_3 0x2d5c |
#define R200_PP_TXOFFSET_4 0x2d60 |
#define R200_PP_CUBIC_OFFSET_F1_4 0x2d64 |
#define R200_PP_CUBIC_OFFSET_F2_4 0x2d68 |
#define R200_PP_CUBIC_OFFSET_F3_4 0x2d6c |
#define R200_PP_CUBIC_OFFSET_F4_4 0x2d70 |
#define R200_PP_CUBIC_OFFSET_F5_4 0x2d74 |
#define R200_PP_TXOFFSET_5 0x2d78 |
#define R200_PP_CUBIC_OFFSET_F1_5 0x2d7c |
#define R200_PP_CUBIC_OFFSET_F2_5 0x2d80 |
#define R200_PP_CUBIC_OFFSET_F3_5 0x2d84 |
#define R200_PP_CUBIC_OFFSET_F4_5 0x2d88 |
#define R200_PP_CUBIC_OFFSET_F5_5 0x2d8c |
#define R200_PP_TFACTOR_0 0x2ee0 |
#define R200_PP_TFACTOR_1 0x2ee4 |
3225,11 → 3169,6 |
# define R200_FORCE_INORDER_PROC (1<<31) |
#define R200_PP_CNTL_X 0x2cc4 |
#define R200_PP_TXMULTI_CTL_0 0x2c1c |
#define R200_PP_TXMULTI_CTL_1 0x2c3c |
#define R200_PP_TXMULTI_CTL_2 0x2c5c |
#define R200_PP_TXMULTI_CTL_3 0x2c7c |
#define R200_PP_TXMULTI_CTL_4 0x2c9c |
#define R200_PP_TXMULTI_CTL_5 0x2cbc |
#define R200_SE_VTX_STATE_CNTL 0x2180 |
# define R200_UPDATE_USER_COLOR_0_ENA_MASK (1<<16) |
3255,24 → 3194,6 |
#define RADEON_CP_RB_WPTR 0x0714 |
#define RADEON_CP_RB_RPTR_WR 0x071c |
#define RADEON_SCRATCH_UMSK 0x0770 |
#define RADEON_SCRATCH_ADDR 0x0774 |
#define R600_CP_RB_BASE 0xc100 |
#define R600_CP_RB_CNTL 0xc104 |
# define R600_RB_BUFSZ(x) ((x) << 0) |
# define R600_RB_BLKSZ(x) ((x) << 8) |
# define R600_RB_NO_UPDATE (1 << 27) |
# define R600_RB_RPTR_WR_ENA (1 << 31) |
#define R600_CP_RB_RPTR_WR 0xc108 |
#define R600_CP_RB_RPTR_ADDR 0xc10c |
#define R600_CP_RB_RPTR_ADDR_HI 0xc110 |
#define R600_CP_RB_WPTR 0xc114 |
#define R600_CP_RB_WPTR_ADDR 0xc118 |
#define R600_CP_RB_WPTR_ADDR_HI 0xc11c |
#define R600_CP_RB_RPTR 0x8700 |
#define R600_CP_RB_WPTR_DELAY 0x8704 |
#define RADEON_CP_IB_BASE 0x0738 |
#define RADEON_CP_IB_BUFSZ 0x073c |
3480,9 → 3401,7 |
# define RADEON_RGB_CONVERT_BY_PASS (1 << 10) |
# define RADEON_UVRAM_READ_MARGIN_SHIFT 16 |
# define RADEON_FIFORAM_FFMACRO_READ_MARGIN_SHIFT 20 |
# define RADEON_RGB_ATTEN_SEL(x) ((x) << 24) |
# define RADEON_TVOUT_SCALE_EN (1 << 26) |
# define RADEON_RGB_ATTEN_VAL(x) ((x) << 28) |
#define RADEON_TV_SYNC_CNTL 0x0808 |
# define RADEON_SYNC_OE (1 << 0) |
# define RADEON_SYNC_OUT (1 << 1) |
3649,6 → 3568,4 |
#define RADEON_SCRATCH_REG4 0x15f0 |
#define RADEON_SCRATCH_REG5 0x15f4 |
#define RV530_GB_PIPE_SELECT2 0x4124 |
#endif |
/drivers/video/drm/drm_crtc.c |
---|
29,7 → 29,7 |
* Dave Airlie <airlied@linux.ie> |
* Jesse Barnes <jesse.barnes@intel.com> |
*/ |
#include <linux/list.h> |
#include <list.h> |
#include "drm.h" |
#include "drmP.h" |
#include "drm_crtc.h" |
68,10 → 68,10 |
*/ |
static struct drm_prop_enum_list drm_scaling_mode_enum_list[] = |
{ |
{ DRM_MODE_SCALE_NONE, "None" }, |
{ DRM_MODE_SCALE_FULLSCREEN, "Full" }, |
{ DRM_MODE_SCALE_CENTER, "Center" }, |
{ DRM_MODE_SCALE_ASPECT, "Full aspect" }, |
{ DRM_MODE_SCALE_NON_GPU, "Non-GPU" }, |
{ DRM_MODE_SCALE_FULLSCREEN, "Fullscreen" }, |
{ DRM_MODE_SCALE_NO_SCALE, "No scale" }, |
{ DRM_MODE_SCALE_ASPECT, "Aspect" }, |
}; |
static struct drm_prop_enum_list drm_dithering_mode_enum_list[] = |
108,7 → 108,6 |
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ |
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ |
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ |
{ DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ |
}; |
DRM_ENUM_NAME_FN(drm_get_tv_select_name, drm_tv_select_enum_list) |
119,7 → 118,6 |
{ DRM_MODE_SUBCONNECTOR_Composite, "Composite" }, /* TV-out */ |
{ DRM_MODE_SUBCONNECTOR_SVIDEO, "SVIDEO" }, /* TV-out */ |
{ DRM_MODE_SUBCONNECTOR_Component, "Component" }, /* TV-out */ |
{ DRM_MODE_SUBCONNECTOR_SCART, "SCART" }, /* TV-out */ |
}; |
DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name, |
148,7 → 146,6 |
{ DRM_MODE_CONNECTOR_DisplayPort, "DisplayPort", 0 }, |
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 }, |
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 }, |
{ DRM_MODE_CONNECTOR_TV, "TV", 0 }, |
}; |
static struct drm_prop_enum_list drm_encoder_enum_list[] = |
168,7 → 165,6 |
encoder->base.id); |
return buf; |
} |
EXPORT_SYMBOL(drm_get_encoder_name); |
char *drm_get_connector_name(struct drm_connector *connector) |
{ |
218,14 → 214,15 |
return -EINVAL; |
} |
mutex_lock(&dev->mode_config.idr_mutex); |
// mutex_lock(&dev->mode_config.idr_mutex); |
ret = idr_get_new_above(&dev->mode_config.crtc_idr, obj, 1, &new_id); |
mutex_unlock(&dev->mode_config.idr_mutex); |
// mutex_unlock(&dev->mode_config.idr_mutex); |
if (ret == -EAGAIN) |
goto again; |
obj->id = new_id; |
obj->type = obj_type; |
return 0; |
} |
242,9 → 239,9 |
static void drm_mode_object_put(struct drm_device *dev, |
struct drm_mode_object *object) |
{ |
mutex_lock(&dev->mode_config.idr_mutex); |
// mutex_lock(&dev->mode_config.idr_mutex); |
idr_remove(&dev->mode_config.crtc_idr, object->id); |
mutex_unlock(&dev->mode_config.idr_mutex); |
// mutex_unlock(&dev->mode_config.idr_mutex); |
} |
void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type) |
251,17 → 248,43 |
{ |
struct drm_mode_object *obj = NULL; |
mutex_lock(&dev->mode_config.idr_mutex); |
// mutex_lock(&dev->mode_config.idr_mutex); |
obj = idr_find(&dev->mode_config.crtc_idr, id); |
if (!obj || (obj->type != type) || (obj->id != id)) |
obj = NULL; |
mutex_unlock(&dev->mode_config.idr_mutex); |
// mutex_unlock(&dev->mode_config.idr_mutex); |
return obj; |
} |
EXPORT_SYMBOL(drm_mode_object_find); |
/** |
* drm_crtc_from_fb - find the CRTC structure associated with an fb |
* @dev: DRM device |
* @fb: framebuffer in question |
* |
* LOCKING: |
* Caller must hold mode_config lock. |
* |
* Find CRTC in the mode_config structure that matches @fb. |
* |
* RETURNS: |
* Pointer to the CRTC or NULL if it wasn't found. |
*/ |
struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, |
struct drm_framebuffer *fb) |
{ |
struct drm_crtc *crtc; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (crtc->fb == fb) |
return crtc; |
} |
return NULL; |
} |
/** |
* drm_framebuffer_init - initialize a framebuffer |
* @dev: DRM device |
* |
307,21 → 330,12 |
{ |
struct drm_device *dev = fb->dev; |
struct drm_crtc *crtc; |
struct drm_mode_set set; |
int ret; |
/* remove from any CRTC */ |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (crtc->fb == fb) { |
/* should turn off the crtc */ |
memset(&set, 0, sizeof(struct drm_mode_set)); |
set.crtc = crtc; |
set.fb = NULL; |
ret = crtc->funcs->set_config(&set); |
if (ret) |
DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); |
if (crtc->fb == fb) |
crtc->fb = NULL; |
} |
} |
drm_mode_object_put(dev, &fb->base); |
list_del(&fb->head); |
329,6 → 343,7 |
} |
EXPORT_SYMBOL(drm_framebuffer_cleanup); |
/** |
* drm_crtc_init - Initialise a new CRTC object |
* @dev: DRM device |
343,15 → 358,20 |
void drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, |
const struct drm_crtc_funcs *funcs) |
{ |
ENTRY(); |
crtc->dev = dev; |
crtc->funcs = funcs; |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); |
list_add_tail(&crtc->head, &dev->mode_config.crtc_list); |
dev->mode_config.num_crtc++; |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
LEAVE(); |
} |
EXPORT_SYMBOL(drm_crtc_init); |
433,7 → 453,7 |
const struct drm_connector_funcs *funcs, |
int connector_type) |
{ |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
connector->dev = dev; |
connector->funcs = funcs; |
455,7 → 475,7 |
drm_connector_attach_property(connector, |
dev->mode_config.dpms_property, 0); |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_connector_init); |
482,10 → 502,10 |
list_for_each_entry_safe(mode, t, &connector->user_modes, head) |
drm_mode_remove(connector, mode); |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
drm_mode_object_put(dev, &connector->base); |
list_del(&connector->head); |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_connector_cleanup); |
494,7 → 514,7 |
const struct drm_encoder_funcs *funcs, |
int encoder_type) |
{ |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
encoder->dev = dev; |
505,7 → 525,7 |
list_add_tail(&encoder->head, &dev->mode_config.encoder_list); |
dev->mode_config.num_encoder++; |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_encoder_init); |
512,10 → 532,10 |
void drm_encoder_cleanup(struct drm_encoder *encoder) |
{ |
struct drm_device *dev = encoder->dev; |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
drm_mode_object_put(dev, &encoder->base); |
list_del(&encoder->head); |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
} |
EXPORT_SYMBOL(drm_encoder_cleanup); |
568,6 → 588,7 |
struct drm_property *dpms; |
int i; |
ENTRY(); |
/* |
* Standard properties (apply to all connectors) |
*/ |
583,6 → 604,7 |
drm_dpms_enum_list[i].name); |
dev->mode_config.dpms_property = dpms; |
LEAVE(); |
return 0; |
} |
703,42 → 725,6 |
drm_property_add_enum(dev->mode_config.tv_mode_property, i, |
i, modes[i]); |
dev->mode_config.tv_brightness_property = |
drm_property_create(dev, DRM_MODE_PROP_RANGE, |
"brightness", 2); |
dev->mode_config.tv_brightness_property->values[0] = 0; |
dev->mode_config.tv_brightness_property->values[1] = 100; |
dev->mode_config.tv_contrast_property = |
drm_property_create(dev, DRM_MODE_PROP_RANGE, |
"contrast", 2); |
dev->mode_config.tv_contrast_property->values[0] = 0; |
dev->mode_config.tv_contrast_property->values[1] = 100; |
dev->mode_config.tv_flicker_reduction_property = |
drm_property_create(dev, DRM_MODE_PROP_RANGE, |
"flicker reduction", 2); |
dev->mode_config.tv_flicker_reduction_property->values[0] = 0; |
dev->mode_config.tv_flicker_reduction_property->values[1] = 100; |
dev->mode_config.tv_overscan_property = |
drm_property_create(dev, DRM_MODE_PROP_RANGE, |
"overscan", 2); |
dev->mode_config.tv_overscan_property->values[0] = 0; |
dev->mode_config.tv_overscan_property->values[1] = 100; |
dev->mode_config.tv_saturation_property = |
drm_property_create(dev, DRM_MODE_PROP_RANGE, |
"saturation", 2); |
dev->mode_config.tv_saturation_property->values[0] = 0; |
dev->mode_config.tv_saturation_property->values[1] = 100; |
dev->mode_config.tv_hue_property = |
drm_property_create(dev, DRM_MODE_PROP_RANGE, |
"hue", 2); |
dev->mode_config.tv_hue_property->values[0] = 0; |
dev->mode_config.tv_hue_property->values[1] = 100; |
return 0; |
} |
EXPORT_SYMBOL(drm_mode_create_tv_properties); |
812,6 → 798,8 |
*/ |
void drm_mode_config_init(struct drm_device *dev) |
{ |
ENTRY(); |
// mutex_init(&dev->mode_config.mutex); |
// mutex_init(&dev->mode_config.idr_mutex); |
INIT_LIST_HEAD(&dev->mode_config.fb_list); |
821,11 → 809,12 |
INIT_LIST_HEAD(&dev->mode_config.encoder_list); |
INIT_LIST_HEAD(&dev->mode_config.property_list); |
INIT_LIST_HEAD(&dev->mode_config.property_blob_list); |
idr_init(&dev->mode_config.crtc_idr); |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
drm_mode_create_standard_connector_properties(dev); |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
/* Just to be sure */ |
dev->mode_config.num_fb = 0; |
832,6 → 821,9 |
dev->mode_config.num_connector = 0; |
dev->mode_config.num_crtc = 0; |
dev->mode_config.num_encoder = 0; |
LEAVE(); |
} |
EXPORT_SYMBOL(drm_mode_config_init); |
1086,7 → 1078,7 |
if (file_priv->master->minor->type == DRM_MINOR_CONTROL) { |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, |
head) { |
DRM_DEBUG_KMS("CRTC ID is %d\n", crtc->base.id); |
DRM_DEBUG("CRTC ID is %d\n", crtc->base.id); |
if (put_user(crtc->base.id, crtc_id + copied)) { |
ret = -EFAULT; |
goto out; |
1114,7 → 1106,7 |
list_for_each_entry(encoder, |
&dev->mode_config.encoder_list, |
head) { |
DRM_DEBUG_KMS("ENCODER ID is %d\n", |
DRM_DEBUG("ENCODER ID is %d\n", |
encoder->base.id); |
if (put_user(encoder->base.id, encoder_id + |
copied)) { |
1145,7 → 1137,7 |
list_for_each_entry(connector, |
&dev->mode_config.connector_list, |
head) { |
DRM_DEBUG_KMS("CONNECTOR ID is %d\n", |
DRM_DEBUG("CONNECTOR ID is %d\n", |
connector->base.id); |
if (put_user(connector->base.id, |
connector_id + copied)) { |
1169,7 → 1161,7 |
} |
card_res->count_connectors = connector_count; |
DRM_DEBUG_KMS("Counted %d %d %d\n", card_res->count_crtcs, |
DRM_DEBUG("Counted %d %d %d\n", card_res->count_crtcs, |
card_res->count_connectors, card_res->count_encoders); |
out: |
1273,7 → 1265,7 |
memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); |
DRM_DEBUG_KMS("connector id %d:\n", out_resp->connector_id); |
DRM_DEBUG("connector id %d:\n", out_resp->connector_id); |
mutex_lock(&dev->mode_config.mutex); |
1449,7 → 1441,7 |
obj = drm_mode_object_find(dev, crtc_req->crtc_id, |
DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", crtc_req->crtc_id); |
DRM_DEBUG("Unknown CRTC ID %d\n", crtc_req->crtc_id); |
ret = -EINVAL; |
goto out; |
} |
1462,8 → 1454,7 |
list_for_each_entry(crtcfb, |
&dev->mode_config.crtc_list, head) { |
if (crtcfb == crtc) { |
DRM_DEBUG_KMS("Using current fb for " |
"setmode\n"); |
DRM_DEBUG("Using current fb for setmode\n"); |
fb = crtc->fb; |
} |
} |
1471,8 → 1462,7 |
obj = drm_mode_object_find(dev, crtc_req->fb_id, |
DRM_MODE_OBJECT_FB); |
if (!obj) { |
DRM_DEBUG_KMS("Unknown FB ID%d\n", |
crtc_req->fb_id); |
DRM_DEBUG("Unknown FB ID%d\n", crtc_req->fb_id); |
ret = -EINVAL; |
goto out; |
} |
1485,13 → 1475,13 |
} |
if (crtc_req->count_connectors == 0 && mode) { |
DRM_DEBUG_KMS("Count connectors is 0 but mode set\n"); |
DRM_DEBUG("Count connectors is 0 but mode set\n"); |
ret = -EINVAL; |
goto out; |
} |
if (crtc_req->count_connectors > 0 && (!mode || !fb)) { |
DRM_DEBUG_KMS("Count connectors is %d but no mode or fb set\n", |
if (crtc_req->count_connectors > 0 && !mode && !fb) { |
DRM_DEBUG("Count connectors is %d but no mode or fb set\n", |
crtc_req->count_connectors); |
ret = -EINVAL; |
goto out; |
1524,8 → 1514,7 |
obj = drm_mode_object_find(dev, out_id, |
DRM_MODE_OBJECT_CONNECTOR); |
if (!obj) { |
DRM_DEBUG_KMS("Connector id %d unknown\n", |
out_id); |
DRM_DEBUG("Connector id %d unknown\n", out_id); |
ret = -EINVAL; |
goto out; |
} |
1558,7 → 1547,7 |
struct drm_crtc *crtc; |
int ret = 0; |
DRM_DEBUG_KMS("\n"); |
DRM_DEBUG("\n"); |
if (!req->flags) { |
DRM_ERROR("no operation set\n"); |
1568,7 → 1557,7 |
mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
DRM_DEBUG_KMS("Unknown CRTC ID %d\n", req->crtc_id); |
DRM_DEBUG("Unknown CRTC ID %d\n", req->crtc_id); |
ret = -EINVAL; |
goto out; |
} |
1785,6 → 1774,7 |
} |
#endif |
/** |
* drm_mode_attachmode - add a mode to the user mode list |
* @dev: DRM device |
1966,6 → 1956,7 |
} |
drm_mode_object_get(dev, &property->base, DRM_MODE_OBJECT_PROPERTY); |
property->flags = flags; |
property->num_values = num_values; |
INIT_LIST_HEAD(&property->enum_blob_list); |
1973,6 → 1964,7 |
if (name) |
strncpy(property->name, name, DRM_PROP_NAME_LEN); |
list_add_tail(&property->head, &dev->mode_config.property_list); |
dbgprintf("%s %x name %s\n", __FUNCTION__, property, name); |
2107,7 → 2099,7 |
uint64_t __user *values_ptr; |
uint32_t __user *blob_length_ptr; |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); |
if (!obj) { |
ret = -EINVAL; |
2185,7 → 2177,7 |
out_resp->count_enum_blobs = blob_count; |
} |
done: |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
2291,7 → 2283,7 |
int ret = -EINVAL; |
int i; |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, out_resp->connector_id, DRM_MODE_OBJECT_CONNECTOR); |
if (!obj) { |
2348,7 → 2340,7 |
if (!ret) |
drm_connector_property_set_value(connector, property, out_resp->value); |
out: |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
2409,7 → 2401,7 |
int size; |
int ret = 0; |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
ret = -EINVAL; |
2445,7 → 2437,7 |
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size); |
out: |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
2460,7 → 2452,7 |
int size; |
int ret = 0; |
mutex_lock(&dev->mode_config.mutex); |
// mutex_lock(&dev->mode_config.mutex); |
obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); |
if (!obj) { |
ret = -EINVAL; |
2493,10 → 2485,8 |
goto out; |
} |
out: |
mutex_unlock(&dev->mode_config.mutex); |
// mutex_unlock(&dev->mode_config.mutex); |
return ret; |
} |
#endif |
/drivers/video/drm/drm_crtc_helper.c |
---|
33,6 → 33,15 |
#include "drm_crtc.h" |
#include "drm_crtc_helper.h" |
/* |
* Detailed mode info for 800x600@60Hz |
*/ |
static struct drm_display_mode std_modes[] = { |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DEFAULT, 40000, 800, 840, |
968, 1056, 0, 600, 601, 605, 628, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
}; |
static void drm_mode_validate_flag(struct drm_connector *connector, |
int flags) |
{ |
85,7 → 94,7 |
int count = 0; |
int mode_flags = 0; |
DRM_DEBUG_KMS("%s\n", drm_get_connector_name(connector)); |
DRM_DEBUG("%s\n", drm_get_connector_name(connector)); |
/* set all modes to the unverified state */ |
list_for_each_entry_safe(mode, t, &connector->modes, head) |
mode->status = MODE_UNVERIFIED; |
93,17 → 102,15 |
connector->status = connector->funcs->detect(connector); |
if (connector->status == connector_status_disconnected) { |
DRM_DEBUG_KMS("%s is disconnected\n", |
DRM_DEBUG("%s is disconnected\n", |
drm_get_connector_name(connector)); |
goto prune; |
/* TODO set EDID to NULL */ |
return 0; |
} |
count = (*connector_funcs->get_modes)(connector); |
if (!count) { |
count = drm_add_modes_noedid(connector, 800, 600); |
if (!count) |
return 0; |
} |
drm_mode_connector_list_update(connector); |
123,7 → 130,7 |
mode); |
} |
prune: |
drm_mode_prune_invalid(dev, &connector->modes, true); |
if (list_empty(&connector->modes)) |
131,8 → 138,7 |
drm_mode_sort(&connector->modes); |
DRM_DEBUG_KMS("Probed modes for %s\n", |
drm_get_connector_name(connector)); |
DRM_DEBUG("Probed modes for %s\n", drm_get_connector_name(connector)); |
list_for_each_entry_safe(mode, t, &connector->modes, head) { |
mode->vrefresh = drm_mode_vrefresh(mode); |
159,6 → 165,39 |
} |
EXPORT_SYMBOL(drm_helper_probe_connector_modes); |
static void drm_helper_add_std_modes(struct drm_device *dev, |
struct drm_connector *connector) |
{ |
struct drm_display_mode *mode, *t; |
int i; |
for (i = 0; i < ARRAY_SIZE(std_modes); i++) { |
struct drm_display_mode *stdmode; |
/* |
* When no valid EDID modes are available we end up |
* here and bailed in the past, now we add some standard |
* modes and move on. |
*/ |
stdmode = drm_mode_duplicate(dev, &std_modes[i]); |
drm_mode_probed_add(connector, stdmode); |
drm_mode_list_concat(&connector->probed_modes, |
&connector->modes); |
DRM_DEBUG("Adding mode %s to %s\n", stdmode->name, |
drm_get_connector_name(connector)); |
} |
drm_mode_sort(&connector->modes); |
DRM_DEBUG("Added std modes on %s\n", drm_get_connector_name(connector)); |
list_for_each_entry_safe(mode, t, &connector->modes, head) { |
mode->vrefresh = drm_mode_vrefresh(mode); |
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V); |
drm_mode_debug_printmodeline(mode); |
} |
} |
/** |
* drm_helper_encoder_in_use - check if a given encoder is in use |
* @encoder: encoder to check |
219,28 → 258,14 |
void drm_helper_disable_unused_functions(struct drm_device *dev) |
{ |
struct drm_encoder *encoder; |
struct drm_connector *connector; |
struct drm_encoder_helper_funcs *encoder_funcs; |
struct drm_crtc *crtc; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
if (!connector->encoder) |
continue; |
if (connector->status == connector_status_disconnected) |
connector->encoder = NULL; |
} |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
encoder_funcs = encoder->helper_private; |
if (!drm_helper_encoder_in_use(encoder)) { |
if (encoder_funcs->disable) |
(*encoder_funcs->disable)(encoder); |
else |
if (!drm_helper_encoder_in_use(encoder)) |
(*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); |
/* disconnector encoder from any connector */ |
encoder->crtc = NULL; |
} |
} |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; |
257,6 → 282,8 |
{ |
struct drm_display_mode *mode; |
ENTRY(); |
list_for_each_entry(mode, &connector->modes, head) { |
if (drm_mode_width(mode) > width || |
drm_mode_height(mode) > height) |
287,7 → 314,7 |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
enabled[i] = drm_connector_enabled(connector, true); |
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id, |
DRM_DEBUG("connector %d enabled ? %s\n", connector->base.id, |
enabled[i] ? "yes" : "no"); |
any_enabled |= enabled[i]; |
i++; |
317,7 → 344,7 |
continue; |
} |
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", |
DRM_DEBUG("looking for preferred mode on connector %d\n", |
connector->base.id); |
modes[i] = drm_has_preferred_mode(connector, width, height); |
326,7 → 353,7 |
list_for_each_entry(modes[i], &connector->modes, head) |
break; |
} |
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : |
DRM_DEBUG("found mode %s\n", modes[i] ? modes[i]->name : |
"none"); |
i++; |
} |
355,6 → 382,8 |
c++; |
} |
dbgprintf("n= %d\n", n); |
best_crtcs[n] = NULL; |
best_crtc = NULL; |
best_score = drm_pick_crtcs(dev, best_crtcs, modes, n+1, width, height); |
366,6 → 395,8 |
if (!crtcs) |
return best_score; |
dbgprintf("crtcs = %x\n", crtcs); |
my_score = 1; |
if (connector->status == connector_status_connected) |
my_score++; |
374,6 → 405,9 |
connector_funcs = connector->helper_private; |
encoder = connector_funcs->best_encoder(connector); |
dbgprintf("encoder = %x\n", encoder); |
if (!encoder) |
goto out; |
384,7 → 418,7 |
c = 0; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if ((encoder->possible_crtcs & (1 << c)) == 0) { |
if ((connector->encoder->possible_crtcs & (1 << c)) == 0) { |
c++; |
continue; |
} |
414,6 → 448,11 |
} |
out: |
kfree(crtcs); |
dbgprintf("best_score= %x\n", best_score); |
LEAVE(); |
return best_score; |
} |
427,7 → 466,7 |
int width, height; |
int i, ret; |
DRM_DEBUG_KMS("\n"); |
DRM_DEBUG("\n"); |
width = 1280; //dev->mode_config.max_width; |
height = 1024; //dev->mode_config.max_height; |
450,7 → 489,7 |
if (!ret) |
DRM_ERROR("Unable to find initial modes\n"); |
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height); |
DRM_DEBUG("picking CRTCs for %dx%d config\n", width, height); |
drm_pick_crtcs(dev, crtcs, modes, 0, width, height); |
465,15 → 504,14 |
} |
if (mode && crtc) { |
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", |
DRM_DEBUG("desired mode %s set on crtc %d\n", |
mode->name, crtc->base.id); |
crtc->desired_mode = mode; |
// crtc->mode = *mode; |
crtc->enabled = true; |
connector->encoder->crtc = crtc; |
} else { |
} else |
connector->encoder->crtc = NULL; |
connector->encoder = NULL; |
} |
i++; |
} |
480,6 → 518,8 |
kfree(crtcs); |
kfree(modes); |
kfree(enabled); |
LEAVE(); |
} |
/** |
563,6 → 603,8 |
struct drm_encoder *encoder; |
bool ret = true; |
ENTRY(); |
adjusted_mode = drm_mode_duplicate(dev, mode); |
crtc->enabled = drm_helper_crtc_in_use(crtc); |
654,7 → 696,7 |
crtc->x = saved_x; |
crtc->y = saved_y; |
} |
LEAVE(); |
return ret; |
} |
EXPORT_SYMBOL(drm_crtc_helper_set_mode); |
680,17 → 722,18 |
int drm_crtc_helper_set_config(struct drm_mode_set *set) |
{ |
struct drm_device *dev; |
struct drm_crtc *save_crtcs, *new_crtc, *crtc; |
struct drm_encoder *save_encoders, *new_encoder, *encoder; |
struct drm_crtc **save_crtcs, *new_crtc; |
struct drm_encoder **save_encoders, *new_encoder; |
struct drm_framebuffer *old_fb = NULL; |
bool mode_changed = false; /* if true do a full mode set */ |
bool fb_changed = false; /* if true and !mode_changed just do a flip */ |
struct drm_connector *save_connectors, *connector; |
bool save_enabled; |
bool mode_changed = false; |
bool fb_changed = false; |
struct drm_connector *connector; |
int count = 0, ro, fail = 0; |
struct drm_crtc_helper_funcs *crtc_funcs; |
int ret = 0; |
DRM_DEBUG_KMS("\n"); |
DRM_DEBUG("\n"); |
if (!set) |
return -EINVAL; |
703,63 → 746,38 |
crtc_funcs = set->crtc->helper_private; |
DRM_DEBUG_KMS("crtc: %p %d fb: %p connectors: %p num_connectors:" |
" %d (x, y) (%i, %i)\n", |
DRM_DEBUG("crtc: %p %d fb: %p connectors: %p num_connectors: %d (x, y) (%i, %i)\n", |
set->crtc, set->crtc->base.id, set->fb, set->connectors, |
(int)set->num_connectors, set->x, set->y); |
dev = set->crtc->dev; |
/* Allocate space for the backup of all (non-pointer) crtc, encoder and |
* connector data. */ |
save_crtcs = kzalloc(dev->mode_config.num_crtc * |
sizeof(struct drm_crtc), GFP_KERNEL); |
/* save previous config */ |
save_enabled = set->crtc->enabled; |
/* |
* We do mode_config.num_connectors here since we'll look at the |
* CRTC and encoder associated with each connector later. |
*/ |
save_crtcs = kzalloc(dev->mode_config.num_connector * |
sizeof(struct drm_crtc *), GFP_KERNEL); |
if (!save_crtcs) |
return -ENOMEM; |
save_encoders = kzalloc(dev->mode_config.num_encoder * |
sizeof(struct drm_encoder), GFP_KERNEL); |
save_encoders = kzalloc(dev->mode_config.num_connector * |
sizeof(struct drm_encoders *), GFP_KERNEL); |
if (!save_encoders) { |
kfree(save_crtcs); |
return -ENOMEM; |
} |
save_connectors = kzalloc(dev->mode_config.num_connector * |
sizeof(struct drm_connector), GFP_KERNEL); |
if (!save_connectors) { |
kfree(save_crtcs); |
kfree(save_encoders); |
return -ENOMEM; |
} |
/* Copy data. Note that driver private data is not affected. |
* Should anything bad happen only the expected state is |
* restored, not the drivers personal bookkeeping. |
*/ |
count = 0; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
save_crtcs[count++] = *crtc; |
} |
count = 0; |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
save_encoders[count++] = *encoder; |
} |
count = 0; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
save_connectors[count++] = *connector; |
} |
/* We should be able to check here if the fb has the same properties |
* and then just flip_or_move it */ |
if (set->crtc->fb != set->fb) { |
/* If we have no fb then treat it as a full mode set */ |
if (set->crtc->fb == NULL) { |
DRM_DEBUG_KMS("crtc has no fb, full mode set\n"); |
DRM_DEBUG("crtc has no fb, full mode set\n"); |
mode_changed = true; |
} else if (set->fb == NULL) { |
mode_changed = true; |
} else if ((set->fb->bits_per_pixel != |
set->crtc->fb->bits_per_pixel) || |
set->fb->depth != set->crtc->fb->depth) |
772,7 → 790,7 |
fb_changed = true; |
if (set->mode && !drm_mode_equal(set->mode, &set->crtc->mode)) { |
DRM_DEBUG_KMS("modes are different, full mode set\n"); |
DRM_DEBUG("modes are different, full mode set\n"); |
drm_mode_debug_printmodeline(&set->crtc->mode); |
drm_mode_debug_printmodeline(set->mode); |
mode_changed = true; |
783,6 → 801,7 |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
struct drm_connector_helper_funcs *connector_funcs = |
connector->helper_private; |
save_encoders[count++] = connector->encoder; |
new_encoder = connector->encoder; |
for (ro = 0; ro < set->num_connectors; ro++) { |
if (set->connectors[ro] == connector) { |
797,13 → 816,8 |
} |
if (new_encoder != connector->encoder) { |
DRM_DEBUG_KMS("encoder changed, full mode switch\n"); |
DRM_DEBUG("encoder changed, full mode switch\n"); |
mode_changed = true; |
/* If the encoder is reused for another connector, then |
* the appropriate crtc will be set later. |
*/ |
if (connector->encoder) |
connector->encoder->crtc = NULL; |
connector->encoder = new_encoder; |
} |
} |
810,7 → 824,7 |
if (fail) { |
ret = -EINVAL; |
goto fail; |
goto fail_no_encoder; |
} |
count = 0; |
818,6 → 832,8 |
if (!connector->encoder) |
continue; |
save_crtcs[count++] = connector->encoder->crtc; |
if (connector->encoder->crtc == set->crtc) |
new_crtc = NULL; |
else |
832,14 → 848,14 |
if (new_crtc && |
!drm_encoder_crtc_ok(connector->encoder, new_crtc)) { |
ret = -EINVAL; |
goto fail; |
goto fail_set_mode; |
} |
if (new_crtc != connector->encoder->crtc) { |
DRM_DEBUG_KMS("crtc changed, full mode switch\n"); |
DRM_DEBUG("crtc changed, full mode switch\n"); |
mode_changed = true; |
connector->encoder->crtc = new_crtc; |
} |
DRM_DEBUG_KMS("setting connector %d crtc to %p\n", |
DRM_DEBUG("setting connector %d crtc to %p\n", |
connector->base.id, new_crtc); |
} |
852,8 → 868,7 |
set->crtc->fb = set->fb; |
set->crtc->enabled = (set->mode != NULL); |
if (set->mode != NULL) { |
DRM_DEBUG_KMS("attempting to set mode from" |
" userspace\n"); |
DRM_DEBUG("attempting to set mode from userspace\n"); |
drm_mode_debug_printmodeline(set->mode); |
if (!drm_crtc_helper_set_mode(set->crtc, set->mode, |
set->x, set->y, |
861,7 → 876,7 |
DRM_ERROR("failed to set mode on crtc %p\n", |
set->crtc); |
ret = -EINVAL; |
goto fail; |
goto fail_set_mode; |
} |
/* TODO are these needed? */ |
set->crtc->desired_x = set->x; |
870,9 → 885,6 |
} |
drm_helper_disable_unused_functions(dev); |
} else if (fb_changed) { |
set->crtc->x = set->x; |
set->crtc->y = set->y; |
old_fb = set->crtc->fb; |
if (set->crtc->fb != set->fb) |
set->crtc->fb = set->fb; |
879,34 → 891,30 |
ret = crtc_funcs->mode_set_base(set->crtc, |
set->x, set->y, old_fb); |
if (ret != 0) |
goto fail; |
goto fail_set_mode; |
} |
kfree(save_connectors); |
kfree(save_encoders); |
kfree(save_crtcs); |
return 0; |
fail: |
/* Restore all previous data. */ |
fail_set_mode: |
set->crtc->enabled = save_enabled; |
set->crtc->fb = old_fb; |
count = 0; |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
*crtc = save_crtcs[count++]; |
} |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
if (!connector->encoder) |
continue; |
count = 0; |
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
*encoder = save_encoders[count++]; |
connector->encoder->crtc = save_crtcs[count++]; |
} |
fail_no_encoder: |
kfree(save_crtcs); |
count = 0; |
list_for_each_entry(connector, &dev->mode_config.connector_list, head) { |
*connector = save_connectors[count++]; |
connector->encoder = save_encoders[count++]; |
} |
kfree(save_connectors); |
kfree(save_encoders); |
kfree(save_crtcs); |
return ret; |
} |
EXPORT_SYMBOL(drm_crtc_helper_set_config); |
913,7 → 921,7 |
bool drm_helper_plugged_event(struct drm_device *dev) |
{ |
DRM_DEBUG_KMS("\n"); |
DRM_DEBUG("\n"); |
drm_helper_probe_connector_modes(dev, dev->mode_config.max_width, |
dev->mode_config.max_height); |
942,22 → 950,36 |
*/ |
bool drm_helper_initial_config(struct drm_device *dev) |
{ |
struct drm_connector *connector; |
int count = 0; |
ENTRY(); |
count = drm_helper_probe_connector_modes(dev, |
dev->mode_config.max_width, |
dev->mode_config.max_height); |
/* |
* we shouldn't end up with no modes here. |
* None of the available connectors had any modes, so add some |
* and try to light them up anyway |
*/ |
// WARN(!count, "Connected connector with 0 modes\n"); |
if (!count) { |
DRM_ERROR("connectors have no modes, using standard modes\n"); |
list_for_each_entry(connector, |
&dev->mode_config.connector_list, |
head) |
drm_helper_add_std_modes(dev, connector); |
} |
drm_setup_crtcs(dev); |
/* alert the driver fb layer */ |
radeonfb_create(dev->dev_private, 1280, 1024, 1280, 1024, NULL); |
// /* alert the driver fb layer */ |
dev->mode_config.funcs->fb_changed(dev); |
LEAVE(); |
return 0; |
} |
EXPORT_SYMBOL(drm_helper_initial_config); |
1078,13 → 1100,15 |
} |
EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct); |
void sysSetScreen(int width, int height, int pitch) |
void sysSetScreen(int width, int height) |
{ |
asm __volatile__ |
( |
"decl %%eax \n\t" |
"dec %%edx \n\t" |
"call *__imp__SetScreen" |
: |
:"a" (width-1),"d"(height-1), "c"(pitch) |
:"a" (width),"d"(height) |
:"memory","cc" |
); |
} |
1093,21 → 1117,33 |
int drm_helper_resume_force_mode(struct drm_device *dev) |
{ |
struct drm_crtc *crtc; |
struct drm_framebuffer *fb; |
int ret; |
ENTRY(); |
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
if (!crtc->enabled) |
continue; |
dbgprintf("mode %x x %x y %x fb %x\n", |
crtc->x, crtc->y, crtc->fb, crtc->mode); |
ret = drm_crtc_helper_set_mode(crtc, &crtc->mode, |
fb = list_first_entry(&dev->mode_config.fb_kernel_list, struct drm_framebuffer, filp_head); |
crtc->fb = fb; |
ret = drm_crtc_helper_set_mode(crtc, crtc->desired_mode, |
crtc->x, crtc->y, crtc->fb); |
if (ret == false) |
DRM_ERROR("failed to set mode on crtc %p\n", crtc); |
sysSetScreen(1280,1024); |
} |
/* disable the unused connectors while restoring the modesetting */ |
drm_helper_disable_unused_functions(dev); |
LEAVE(); |
return 0; |
} |
EXPORT_SYMBOL(drm_helper_resume_force_mode); |
/drivers/video/drm/drm_mm.c |
---|
44,7 → 44,6 |
#include "drmP.h" |
#include "drm_mm.h" |
//#include <linux/slab.h> |
#include <linux/seq_file.h> |
#define MM_UNUSED_TARGET 4 |
/drivers/video/drm/idr.c |
---|
27,10 → 27,6 |
*/ |
#include <linux/idr.h> |
#include <stdlib.h> |
#include "drm.h" |
#include "drmP.h" |
#include "drm_crtc.h" |
#define ADDR "=m" (*(volatile long *) addr) |
422,6 → 418,7 |
int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) |
{ |
int rv; |
rv = idr_get_new_above_int(idp, ptr, starting_id); |
/* |
* This is a cheap hack until the IDR code can be fixed to |
428,10 → 425,7 |
* return proper error values. |
*/ |
if (rv < 0) |
{ |
dbgprintf("fail\n"); |
return _idr_rc_to_errno(rv); |
}; |
*id = rv; |
return 0; |
} |
/drivers/video/drm/drm_edid.c |
---|
30,11 → 30,15 |
#include <types.h> |
#include <list.h> |
#include <linux/idr.h> |
#include <linux/i2c.h> |
#include <linux/i2c-algo-bit.h> |
#include "drmP.h" |
#include "drm_edid.h" |
/* |
* TODO: |
* - support EDID 1.4 (incl. CE blocks) |
63,13 → 67,7 |
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5) |
/* use +hsync +vsync for detailed mode */ |
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6) |
/* define the number of Extension EDID block */ |
#define MAX_EDID_EXT_NUM 4 |
#define LEVEL_DMT 0 |
#define LEVEL_GTF 1 |
#define LEVEL_CVT 2 |
static struct edid_quirk { |
char *vendor; |
int product_id; |
246,263 → 244,6 |
preferred_mode->type |= DRM_MODE_TYPE_PREFERRED; |
} |
/* |
* Add the Autogenerated from the DMT spec. |
* This table is copied from xfree86/modes/xf86EdidModes.c. |
* But the mode with Reduced blank feature is deleted. |
*/ |
static struct drm_display_mode drm_dmt_modes[] = { |
/* 640x350@85Hz */ |
{ DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, |
736, 832, 0, 350, 382, 385, 445, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x400@85Hz */ |
{ DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 31500, 640, 672, |
736, 832, 0, 400, 401, 404, 445, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 720x400@85Hz */ |
{ DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 35500, 720, 756, |
828, 936, 0, 400, 401, 404, 446, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 640x480@60Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656, |
752, 800, 0, 480, 489, 492, 525, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x480@72Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 664, |
704, 832, 0, 480, 489, 492, 520, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x480@75Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 31500, 640, 656, |
720, 840, 0, 480, 481, 484, 500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 640x480@85Hz */ |
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 36000, 640, 696, |
752, 832, 0, 480, 481, 484, 509, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 800x600@56Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 36000, 800, 824, |
896, 1024, 0, 600, 601, 603, 625, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@60Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840, |
968, 1056, 0, 600, 601, 605, 628, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@72Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 50000, 800, 856, |
976, 1040, 0, 600, 637, 643, 666, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@75Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 49500, 800, 816, |
896, 1056, 0, 600, 601, 604, 625, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 800x600@85Hz */ |
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 56250, 800, 832, |
896, 1048, 0, 600, 601, 604, 631, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 848x480@60Hz */ |
{ DRM_MODE("848x480", DRM_MODE_TYPE_DRIVER, 33750, 848, 864, |
976, 1088, 0, 480, 486, 494, 517, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1024x768@43Hz, interlace */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 44900, 1024, 1032, |
1208, 1264, 0, 768, 768, 772, 817, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_INTERLACE) }, |
/* 1024x768@60Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048, |
1184, 1344, 0, 768, 771, 777, 806, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1024x768@70Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 75000, 1024, 1048, |
1184, 1328, 0, 768, 771, 777, 806, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1024x768@75Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 78750, 1024, 1040, |
1136, 1312, 0, 768, 769, 772, 800, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1024x768@85Hz */ |
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 94500, 1024, 1072, |
1072, 1376, 0, 768, 769, 772, 808, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1152x864@75Hz */ |
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216, |
1344, 1600, 0, 864, 865, 868, 900, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x768@60Hz */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344, |
1472, 1664, 0, 768, 771, 778, 798, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x768@75Hz */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 102250, 1280, 1360, |
1488, 1696, 0, 768, 771, 778, 805, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x768@85Hz */ |
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 117500, 1280, 1360, |
1496, 1712, 0, 768, 771, 778, 809, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x800@60Hz */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352, |
1480, 1680, 0, 800, 803, 809, 831, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) }, |
/* 1280x800@75Hz */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 106500, 1280, 1360, |
1488, 1696, 0, 800, 803, 809, 838, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x800@85Hz */ |
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 122500, 1280, 1360, |
1496, 1712, 0, 800, 803, 809, 843, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x960@60Hz */ |
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376, |
1488, 1800, 0, 960, 961, 964, 1000, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x960@85Hz */ |
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1344, |
1504, 1728, 0, 960, 961, 964, 1011, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x1024@60Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328, |
1440, 1688, 0, 1024, 1025, 1028, 1066, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x1024@75Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 135000, 1280, 1296, |
1440, 1688, 0, 1024, 1025, 1028, 1066, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1280x1024@85Hz */ |
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 157500, 1280, 1344, |
1504, 1728, 0, 1024, 1025, 1028, 1072, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1360x768@60Hz */ |
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424, |
1536, 1792, 0, 768, 771, 777, 795, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x1050@60Hz */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488, |
1632, 1864, 0, 1050, 1053, 1057, 1089, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x1050@75Hz */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 156000, 1400, 1504, |
1648, 1896, 0, 1050, 1053, 1057, 1099, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x1050@85Hz */ |
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 179500, 1400, 1504, |
1656, 1912, 0, 1050, 1053, 1057, 1105, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x900@60Hz */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520, |
1672, 1904, 0, 900, 903, 909, 934, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x900@75Hz */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 136750, 1440, 1536, |
1688, 1936, 0, 900, 903, 909, 942, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1440x900@85Hz */ |
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 157000, 1440, 1544, |
1696, 1952, 0, 900, 903, 909, 948, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@60Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@65Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 175500, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@70Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 189000, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@75Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 2025000, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1600x1200@85Hz */ |
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 229500, 1600, 1664, |
1856, 2160, 0, 1200, 1201, 1204, 1250, 0, |
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1680x1050@60Hz */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784, |
1960, 2240, 0, 1050, 1053, 1059, 1089, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1680x1050@75Hz */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 187000, 1680, 1800, |
1976, 2272, 0, 1050, 1053, 1059, 1099, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1680x1050@85Hz */ |
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 214750, 1680, 1808, |
1984, 2288, 0, 1050, 1053, 1059, 1105, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1792x1344@60Hz */ |
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920, |
2120, 2448, 0, 1344, 1345, 1348, 1394, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1729x1344@75Hz */ |
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 261000, 1792, 1888, |
2104, 2456, 0, 1344, 1345, 1348, 1417, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1853x1392@60Hz */ |
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952, |
2176, 2528, 0, 1392, 1393, 1396, 1439, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1856x1392@75Hz */ |
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 288000, 1856, 1984, |
2208, 2560, 0, 1392, 1395, 1399, 1500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1200@60Hz */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056, |
2256, 2592, 0, 1200, 1203, 1209, 1245, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1200@75Hz */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 245250, 1920, 2056, |
2264, 2608, 0, 1200, 1203, 1209, 1255, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1200@85Hz */ |
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 281250, 1920, 2064, |
2272, 2624, 0, 1200, 1203, 1209, 1262, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1440@60Hz */ |
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048, |
2256, 2600, 0, 1440, 1441, 1444, 1500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 1920x1440@75Hz */ |
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2064, |
2288, 2640, 0, 1440, 1441, 1444, 1500, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 2560x1600@60Hz */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752, |
3032, 3504, 0, 1600, 1603, 1609, 1658, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 2560x1600@75HZ */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 443250, 2560, 2768, |
3048, 3536, 0, 1600, 1603, 1609, 1672, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
/* 2560x1600@85HZ */ |
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 505250, 2560, 2768, |
3048, 3536, 0, 1600, 1603, 1609, 1682, 0, |
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) }, |
}; |
static struct drm_display_mode *drm_find_dmt(struct drm_device *dev, |
int hsize, int vsize, int fresh) |
{ |
int i, count; |
struct drm_display_mode *ptr, *mode; |
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); |
mode = NULL; |
for (i = 0; i < count; i++) { |
ptr = &drm_dmt_modes[i]; |
if (hsize == ptr->hdisplay && |
vsize == ptr->vdisplay && |
fresh == drm_mode_vrefresh(ptr)) { |
/* get the expected default mode */ |
mode = drm_mode_duplicate(dev, ptr); |
break; |
} |
} |
return mode; |
} |
/** |
* drm_mode_std - convert standard mode info (width, height, refresh) into mode |
* @t: standard timing params |
514,22 → 255,16 |
* generation code. |
*/ |
struct drm_display_mode *drm_mode_std(struct drm_device *dev, |
struct std_timing *t, |
int timing_level) |
struct std_timing *t) |
{ |
struct drm_display_mode *mode; |
int hsize, vsize; |
int vrefresh_rate; |
int hsize = t->hsize * 8 + 248, vsize; |
unsigned aspect_ratio = (t->vfreq_aspect & EDID_TIMING_ASPECT_MASK) |
>> EDID_TIMING_ASPECT_SHIFT; |
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) |
>> EDID_TIMING_VFREQ_SHIFT; |
/* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ |
hsize = t->hsize * 8 + 248; |
/* vrefresh_rate = vfreq + 60 */ |
vrefresh_rate = vfreq + 60; |
/* the vdisplay is calculated based on the aspect ratio */ |
mode = drm_mode_create(dev); |
if (!mode) |
return NULL; |
if (aspect_ratio == 0) |
vsize = (hsize * 10) / 16; |
539,30 → 274,9 |
vsize = (hsize * 4) / 5; |
else |
vsize = (hsize * 9) / 16; |
/* HDTV hack */ |
if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { |
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
mode->hdisplay = 1366; |
mode->vsync_start = mode->vsync_start - 1; |
mode->vsync_end = mode->vsync_end - 1; |
return mode; |
} |
mode = NULL; |
/* check whether it can be found in default mode table */ |
mode = drm_find_dmt(dev, hsize, vsize, vrefresh_rate); |
if (mode) |
return mode; |
switch (timing_level) { |
case LEVEL_DMT: |
break; |
case LEVEL_GTF: |
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
break; |
case LEVEL_CVT: |
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); |
break; |
} |
drm_mode_set_name(mode); |
return mode; |
} |
744,19 → 458,6 |
return modes; |
} |
/** |
* stanard_timing_level - get std. timing level(CVT/GTF/DMT) |
* @edid: EDID block to scan |
*/ |
static int standard_timing_level(struct edid *edid) |
{ |
if (edid->revision >= 2) { |
if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)) |
return LEVEL_CVT; |
return LEVEL_GTF; |
} |
return LEVEL_DMT; |
} |
/** |
* add_standard_modes - get std. modes from EDID and add them |
769,10 → 470,7 |
{ |
struct drm_device *dev = connector->dev; |
int i, modes = 0; |
int timing_level; |
timing_level = standard_timing_level(edid); |
for (i = 0; i < EDID_STD_TIMINGS; i++) { |
struct std_timing *t = &edid->standard_timings[i]; |
struct drm_display_mode *newmode; |
781,8 → 479,7 |
if (t->hsize == 1 && t->vfreq_aspect == 1) |
continue; |
newmode = drm_mode_std(dev, &edid->standard_timings[i], |
timing_level); |
newmode = drm_mode_std(dev, &edid->standard_timings[i]); |
if (newmode) { |
drm_mode_probed_add(connector, newmode); |
modes++; |
806,50 → 503,18 |
{ |
struct drm_device *dev = connector->dev; |
int i, j, modes = 0; |
int timing_level; |
timing_level = standard_timing_level(edid); |
for (i = 0; i < EDID_DETAILED_TIMINGS; i++) { |
struct detailed_timing *timing = &edid->detailed_timings[i]; |
struct detailed_non_pixel *data = &timing->data.other_data; |
struct drm_display_mode *newmode; |
/* X server check is version 1.1 or higher */ |
if (edid->version == 1 && edid->revision >= 1 && |
!timing->pixel_clock) { |
/* Other timing or info */ |
switch (data->type) { |
case EDID_DETAIL_MONITOR_SERIAL: |
break; |
case EDID_DETAIL_MONITOR_STRING: |
break; |
case EDID_DETAIL_MONITOR_RANGE: |
/* Get monitor range data */ |
break; |
case EDID_DETAIL_MONITOR_NAME: |
break; |
case EDID_DETAIL_MONITOR_CPDATA: |
break; |
case EDID_DETAIL_STD_MODES: |
/* Five modes per detailed section */ |
for (j = 0; j < 5; i++) { |
struct std_timing *std; |
struct drm_display_mode *newmode; |
/* EDID up to and including 1.2 may put monitor info here */ |
if (edid->version == 1 && edid->revision < 3) |
continue; |
std = &data->data.timings[j]; |
newmode = drm_mode_std(dev, std, |
timing_level); |
if (newmode) { |
drm_mode_probed_add(connector, newmode); |
modes++; |
} |
} |
break; |
default: |
break; |
} |
} else { |
/* Detailed mode timing */ |
if (timing->pixel_clock) { |
newmode = drm_mode_detailed(dev, edid, timing, quirks); |
if (!newmode) |
continue; |
860,91 → 525,7 |
drm_mode_probed_add(connector, newmode); |
modes++; |
} |
} |
return modes; |
} |
/** |
* add_detailed_mode_eedid - get detailed mode info from addtional timing |
* EDID block |
* @connector: attached connector |
* @edid: EDID block to scan(It is only to get addtional timing EDID block) |
* @quirks: quirks to apply |
* |
* Some of the detailed timing sections may contain mode information. Grab |
* it and add it to the list. |
*/ |
static int add_detailed_info_eedid(struct drm_connector *connector, |
struct edid *edid, u32 quirks) |
{ |
struct drm_device *dev = connector->dev; |
int i, j, modes = 0; |
char *edid_ext = NULL; |
struct detailed_timing *timing; |
struct detailed_non_pixel *data; |
struct drm_display_mode *newmode; |
int edid_ext_num; |
int start_offset, end_offset; |
int timing_level; |
if (edid->version == 1 && edid->revision < 3) { |
/* If the EDID version is less than 1.3, there is no |
* extension EDID. |
*/ |
return 0; |
} |
if (!edid->extensions) { |
/* if there is no extension EDID, it is unnecessary to |
* parse the E-EDID to get detailed info |
*/ |
return 0; |
} |
/* Chose real EDID extension number */ |
edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ? |
MAX_EDID_EXT_NUM : edid->extensions; |
/* Find CEA extension */ |
for (i = 0; i < edid_ext_num; i++) { |
edid_ext = (char *)edid + EDID_LENGTH * (i + 1); |
/* This block is CEA extension */ |
if (edid_ext[0] == 0x02) |
break; |
} |
if (i == edid_ext_num) { |
/* if there is no additional timing EDID block, return */ |
return 0; |
} |
/* Get the start offset of detailed timing block */ |
start_offset = edid_ext[2]; |
if (start_offset == 0) { |
/* If the start_offset is zero, it means that neither detailed |
* info nor data block exist. In such case it is also |
* unnecessary to parse the detailed timing info. |
*/ |
return 0; |
} |
timing_level = standard_timing_level(edid); |
end_offset = EDID_LENGTH; |
end_offset -= sizeof(struct detailed_timing); |
for (i = start_offset; i < end_offset; |
i += sizeof(struct detailed_timing)) { |
timing = (struct detailed_timing *)(edid_ext + i); |
data = &timing->data.other_data; |
/* Detailed mode timing */ |
if (timing->pixel_clock) { |
newmode = drm_mode_detailed(dev, edid, timing, quirks); |
if (!newmode) |
continue; |
drm_mode_probed_add(connector, newmode); |
modes++; |
continue; |
} |
/* Other timing or info */ |
967,7 → 548,7 |
struct drm_display_mode *newmode; |
std = &data->data.timings[j]; |
newmode = drm_mode_std(dev, std, timing_level); |
newmode = drm_mode_std(dev, std); |
if (newmode) { |
drm_mode_probed_add(connector, newmode); |
modes++; |
1027,6 → 608,8 |
ret = drm_do_probe_ddc_edid(adapter, buf, len); |
if (ret != 0) { |
// dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", |
// drm_get_connector_name(connector)); |
goto end; |
} |
if (!edid_is_valid((struct edid *)buf)) { |
1038,6 → 621,7 |
return ret; |
} |
#define MAX_EDID_EXT_NUM 4 |
/** |
* drm_get_edid - get EDID data, if available |
* @connector: connector we're probing |
1190,7 → 774,6 |
num_modes += add_established_modes(connector, edid); |
num_modes += add_standard_modes(connector, edid); |
num_modes += add_detailed_info(connector, edid, quirks); |
num_modes += add_detailed_info_eedid(connector, edid, quirks); |
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75)) |
edid_fixup_preferred(connector, quirks); |
1216,49 → 799,3 |
return num_modes; |
} |
EXPORT_SYMBOL(drm_add_edid_modes); |
/** |
* drm_add_modes_noedid - add modes for the connectors without EDID |
* @connector: connector we're probing |
* @hdisplay: the horizontal display limit |
* @vdisplay: the vertical display limit |
* |
* Add the specified modes to the connector's mode list. Only when the |
* hdisplay/vdisplay is not beyond the given limit, it will be added. |
* |
* Return number of modes added or 0 if we couldn't find any. |
*/ |
int drm_add_modes_noedid(struct drm_connector *connector, |
int hdisplay, int vdisplay) |
{ |
int i, count, num_modes = 0; |
struct drm_display_mode *mode, *ptr; |
struct drm_device *dev = connector->dev; |
count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode); |
if (hdisplay < 0) |
hdisplay = 0; |
if (vdisplay < 0) |
vdisplay = 0; |
for (i = 0; i < count; i++) { |
ptr = &drm_dmt_modes[i]; |
if (hdisplay && vdisplay) { |
/* |
* Only when two are valid, they will be used to check |
* whether the mode should be added to the mode list of |
* the connector. |
*/ |
if (ptr->hdisplay > hdisplay || |
ptr->vdisplay > vdisplay) |
continue; |
} |
mode = drm_mode_duplicate(dev, ptr); |
if (mode) { |
drm_mode_probed_add(connector, mode); |
num_modes++; |
} |
} |
return num_modes; |
} |
EXPORT_SYMBOL(drm_add_modes_noedid); |
/drivers/video/drm/drm_modes.c |
---|
8,8 → 8,6 |
* Copyright © 2007 Dave Airlie |
* Copyright © 2007-2008 Intel Corporation |
* Jesse Barnes <jesse.barnes@intel.com> |
* Copyright 2005-2006 Luc Verhaegen |
* Copyright (c) 2001, Andy Ritger aritger@nvidia.com |
* |
* Permission is hereby granted, free of charge, to any person obtaining a |
* copy of this software and associated documentation files (the "Software"), |
35,11 → 33,12 |
* authorization from the copyright holder(s) and author(s). |
*/ |
#include <linux/list.h> |
#include <list.h> |
#include "drmP.h" |
#include "drm.h" |
#include "drm_crtc.h" |
#define DRM_MODESET_DEBUG "drm_mode" |
/** |
* drm_mode_debug_printmodeline - debug print a mode |
* @dev: DRM device |
52,8 → 51,8 |
*/ |
void drm_mode_debug_printmodeline(struct drm_display_mode *mode) |
{ |
DRM_DEBUG_KMS("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d " |
"0x%x 0x%x\n", |
DRM_DEBUG_MODE(DRM_MODESET_DEBUG, |
"Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", |
mode->base.id, mode->name, mode->vrefresh, mode->clock, |
mode->hdisplay, mode->hsync_start, |
mode->hsync_end, mode->htotal, |
63,420 → 62,6 |
EXPORT_SYMBOL(drm_mode_debug_printmodeline); |
/** |
* drm_cvt_mode -create a modeline based on CVT algorithm |
* @dev: DRM device |
* @hdisplay: hdisplay size |
* @vdisplay: vdisplay size |
* @vrefresh : vrefresh rate |
* @reduced : Whether the GTF calculation is simplified |
* @interlaced:Whether the interlace is supported |
* |
* LOCKING: |
* none. |
* |
* return the modeline based on CVT algorithm |
* |
* This function is called to generate the modeline based on CVT algorithm |
* according to the hdisplay, vdisplay, vrefresh. |
* It is based from the VESA(TM) Coordinated Video Timing Generator by |
* Graham Loveridge April 9, 2003 available at |
* http://www.vesa.org/public/CVT/CVTd6r1.xls |
* |
* And it is copied from xf86CVTmode in xserver/hw/xfree86/modes/xf86cvt.c. |
* What I have done is to translate it by using integer calculation. |
*/ |
#define HV_FACTOR 1000 |
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, |
int vdisplay, int vrefresh, |
bool reduced, bool interlaced) |
{ |
/* 1) top/bottom margin size (% of height) - default: 1.8, */ |
#define CVT_MARGIN_PERCENTAGE 18 |
/* 2) character cell horizontal granularity (pixels) - default 8 */ |
#define CVT_H_GRANULARITY 8 |
/* 3) Minimum vertical porch (lines) - default 3 */ |
#define CVT_MIN_V_PORCH 3 |
/* 4) Minimum number of vertical back porch lines - default 6 */ |
#define CVT_MIN_V_BPORCH 6 |
/* Pixel Clock step (kHz) */ |
#define CVT_CLOCK_STEP 250 |
struct drm_display_mode *drm_mode; |
bool margins = false; |
unsigned int vfieldrate, hperiod; |
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; |
int interlace; |
/* allocate the drm_display_mode structure. If failure, we will |
* return directly |
*/ |
drm_mode = drm_mode_create(dev); |
if (!drm_mode) |
return NULL; |
/* the CVT default refresh rate is 60Hz */ |
if (!vrefresh) |
vrefresh = 60; |
/* the required field fresh rate */ |
if (interlaced) |
vfieldrate = vrefresh * 2; |
else |
vfieldrate = vrefresh; |
/* horizontal pixels */ |
hdisplay_rnd = hdisplay - (hdisplay % CVT_H_GRANULARITY); |
/* determine the left&right borders */ |
hmargin = 0; |
if (margins) { |
hmargin = hdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; |
hmargin -= hmargin % CVT_H_GRANULARITY; |
} |
/* find the total active pixels */ |
drm_mode->hdisplay = hdisplay_rnd + 2 * hmargin; |
/* find the number of lines per field */ |
if (interlaced) |
vdisplay_rnd = vdisplay / 2; |
else |
vdisplay_rnd = vdisplay; |
/* find the top & bottom borders */ |
vmargin = 0; |
if (margins) |
vmargin = vdisplay_rnd * CVT_MARGIN_PERCENTAGE / 1000; |
drm_mode->vdisplay = vdisplay + 2 * vmargin; |
/* Interlaced */ |
if (interlaced) |
interlace = 1; |
else |
interlace = 0; |
/* Determine VSync Width from aspect ratio */ |
if (!(vdisplay % 3) && ((vdisplay * 4 / 3) == hdisplay)) |
vsync = 4; |
else if (!(vdisplay % 9) && ((vdisplay * 16 / 9) == hdisplay)) |
vsync = 5; |
else if (!(vdisplay % 10) && ((vdisplay * 16 / 10) == hdisplay)) |
vsync = 6; |
else if (!(vdisplay % 4) && ((vdisplay * 5 / 4) == hdisplay)) |
vsync = 7; |
else if (!(vdisplay % 9) && ((vdisplay * 15 / 9) == hdisplay)) |
vsync = 7; |
else /* custom */ |
vsync = 10; |
if (!reduced) { |
/* simplify the GTF calculation */ |
/* 4) Minimum time of vertical sync + back porch interval (µs) |
* default 550.0 |
*/ |
int tmp1, tmp2; |
#define CVT_MIN_VSYNC_BP 550 |
/* 3) Nominal HSync width (% of line period) - default 8 */ |
#define CVT_HSYNC_PERCENTAGE 8 |
unsigned int hblank_percentage; |
int vsyncandback_porch, vback_porch, hblank; |
/* estimated the horizontal period */ |
tmp1 = HV_FACTOR * 1000000 - |
CVT_MIN_VSYNC_BP * HV_FACTOR * vfieldrate; |
tmp2 = (vdisplay_rnd + 2 * vmargin + CVT_MIN_V_PORCH) * 2 + |
interlace; |
hperiod = tmp1 * 2 / (tmp2 * vfieldrate); |
tmp1 = CVT_MIN_VSYNC_BP * HV_FACTOR / hperiod + 1; |
/* 9. Find number of lines in sync + backporch */ |
if (tmp1 < (vsync + CVT_MIN_V_PORCH)) |
vsyncandback_porch = vsync + CVT_MIN_V_PORCH; |
else |
vsyncandback_porch = tmp1; |
/* 10. Find number of lines in back porch */ |
vback_porch = vsyncandback_porch - vsync; |
drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + |
vsyncandback_porch + CVT_MIN_V_PORCH; |
/* 5) Definition of Horizontal blanking time limitation */ |
/* Gradient (%/kHz) - default 600 */ |
#define CVT_M_FACTOR 600 |
/* Offset (%) - default 40 */ |
#define CVT_C_FACTOR 40 |
/* Blanking time scaling factor - default 128 */ |
#define CVT_K_FACTOR 128 |
/* Scaling factor weighting - default 20 */ |
#define CVT_J_FACTOR 20 |
#define CVT_M_PRIME (CVT_M_FACTOR * CVT_K_FACTOR / 256) |
#define CVT_C_PRIME ((CVT_C_FACTOR - CVT_J_FACTOR) * CVT_K_FACTOR / 256 + \ |
CVT_J_FACTOR) |
/* 12. Find ideal blanking duty cycle from formula */ |
hblank_percentage = CVT_C_PRIME * HV_FACTOR - CVT_M_PRIME * |
hperiod / 1000; |
/* 13. Blanking time */ |
if (hblank_percentage < 20 * HV_FACTOR) |
hblank_percentage = 20 * HV_FACTOR; |
hblank = drm_mode->hdisplay * hblank_percentage / |
(100 * HV_FACTOR - hblank_percentage); |
hblank -= hblank % (2 * CVT_H_GRANULARITY); |
/* 14. find the total pixes per line */ |
drm_mode->htotal = drm_mode->hdisplay + hblank; |
drm_mode->hsync_end = drm_mode->hdisplay + hblank / 2; |
drm_mode->hsync_start = drm_mode->hsync_end - |
(drm_mode->htotal * CVT_HSYNC_PERCENTAGE) / 100; |
drm_mode->hsync_start += CVT_H_GRANULARITY - |
drm_mode->hsync_start % CVT_H_GRANULARITY; |
/* fill the Vsync values */ |
drm_mode->vsync_start = drm_mode->vdisplay + CVT_MIN_V_PORCH; |
drm_mode->vsync_end = drm_mode->vsync_start + vsync; |
} else { |
/* Reduced blanking */ |
/* Minimum vertical blanking interval time (µs)- default 460 */ |
#define CVT_RB_MIN_VBLANK 460 |
/* Fixed number of clocks for horizontal sync */ |
#define CVT_RB_H_SYNC 32 |
/* Fixed number of clocks for horizontal blanking */ |
#define CVT_RB_H_BLANK 160 |
/* Fixed number of lines for vertical front porch - default 3*/ |
#define CVT_RB_VFPORCH 3 |
int vbilines; |
int tmp1, tmp2; |
/* 8. Estimate Horizontal period. */ |
tmp1 = HV_FACTOR * 1000000 - |
CVT_RB_MIN_VBLANK * HV_FACTOR * vfieldrate; |
tmp2 = vdisplay_rnd + 2 * vmargin; |
hperiod = tmp1 / (tmp2 * vfieldrate); |
/* 9. Find number of lines in vertical blanking */ |
vbilines = CVT_RB_MIN_VBLANK * HV_FACTOR / hperiod + 1; |
/* 10. Check if vertical blanking is sufficient */ |
if (vbilines < (CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH)) |
vbilines = CVT_RB_VFPORCH + vsync + CVT_MIN_V_BPORCH; |
/* 11. Find total number of lines in vertical field */ |
drm_mode->vtotal = vdisplay_rnd + 2 * vmargin + vbilines; |
/* 12. Find total number of pixels in a line */ |
drm_mode->htotal = drm_mode->hdisplay + CVT_RB_H_BLANK; |
/* Fill in HSync values */ |
drm_mode->hsync_end = drm_mode->hdisplay + CVT_RB_H_BLANK / 2; |
drm_mode->hsync_start = drm_mode->hsync_end = CVT_RB_H_SYNC; |
} |
/* 15/13. Find pixel clock frequency (kHz for xf86) */ |
drm_mode->clock = drm_mode->htotal * HV_FACTOR * 1000 / hperiod; |
drm_mode->clock -= drm_mode->clock % CVT_CLOCK_STEP; |
/* 18/16. Find actual vertical frame frequency */ |
/* ignore - just set the mode flag for interlaced */ |
if (interlaced) |
drm_mode->vtotal *= 2; |
/* Fill the mode line name */ |
drm_mode_set_name(drm_mode); |
if (reduced) |
drm_mode->flags |= (DRM_MODE_FLAG_PHSYNC | |
DRM_MODE_FLAG_NVSYNC); |
else |
drm_mode->flags |= (DRM_MODE_FLAG_PVSYNC | |
DRM_MODE_FLAG_NHSYNC); |
if (interlaced) |
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; |
return drm_mode; |
} |
EXPORT_SYMBOL(drm_cvt_mode); |
/** |
* drm_gtf_mode - create the modeline based on GTF algorithm |
* |
* @dev :drm device |
* @hdisplay :hdisplay size |
* @vdisplay :vdisplay size |
* @vrefresh :vrefresh rate. |
* @interlaced :whether the interlace is supported |
* @margins :whether the margin is supported |
* |
* LOCKING. |
* none. |
* |
* return the modeline based on GTF algorithm |
* |
* This function is to create the modeline based on the GTF algorithm. |
* Generalized Timing Formula is derived from: |
* GTF Spreadsheet by Andy Morrish (1/5/97) |
* available at http://www.vesa.org |
* |
* And it is copied from the file of xserver/hw/xfree86/modes/xf86gtf.c. |
* What I have done is to translate it by using integer calculation. |
* I also refer to the function of fb_get_mode in the file of |
* drivers/video/fbmon.c |
*/ |
struct drm_display_mode *drm_gtf_mode(struct drm_device *dev, int hdisplay, |
int vdisplay, int vrefresh, |
bool interlaced, int margins) |
{ |
/* 1) top/bottom margin size (% of height) - default: 1.8, */ |
#define GTF_MARGIN_PERCENTAGE 18 |
/* 2) character cell horizontal granularity (pixels) - default 8 */ |
#define GTF_CELL_GRAN 8 |
/* 3) Minimum vertical porch (lines) - default 3 */ |
#define GTF_MIN_V_PORCH 1 |
/* width of vsync in lines */ |
#define V_SYNC_RQD 3 |
/* width of hsync as % of total line */ |
#define H_SYNC_PERCENT 8 |
/* min time of vsync + back porch (microsec) */ |
#define MIN_VSYNC_PLUS_BP 550 |
/* blanking formula gradient */ |
#define GTF_M 600 |
/* blanking formula offset */ |
#define GTF_C 40 |
/* blanking formula scaling factor */ |
#define GTF_K 128 |
/* blanking formula scaling factor */ |
#define GTF_J 20 |
/* C' and M' are part of the Blanking Duty Cycle computation */ |
#define GTF_C_PRIME (((GTF_C - GTF_J) * GTF_K / 256) + GTF_J) |
#define GTF_M_PRIME (GTF_K * GTF_M / 256) |
struct drm_display_mode *drm_mode; |
unsigned int hdisplay_rnd, vdisplay_rnd, vfieldrate_rqd; |
int top_margin, bottom_margin; |
int interlace; |
unsigned int hfreq_est; |
int vsync_plus_bp, vback_porch; |
unsigned int vtotal_lines, vfieldrate_est, hperiod; |
unsigned int vfield_rate, vframe_rate; |
int left_margin, right_margin; |
unsigned int total_active_pixels, ideal_duty_cycle; |
unsigned int hblank, total_pixels, pixel_freq; |
int hsync, hfront_porch, vodd_front_porch_lines; |
unsigned int tmp1, tmp2; |
drm_mode = drm_mode_create(dev); |
if (!drm_mode) |
return NULL; |
/* 1. In order to give correct results, the number of horizontal |
* pixels requested is first processed to ensure that it is divisible |
* by the character size, by rounding it to the nearest character |
* cell boundary: |
*/ |
hdisplay_rnd = (hdisplay + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; |
hdisplay_rnd = hdisplay_rnd * GTF_CELL_GRAN; |
/* 2. If interlace is requested, the number of vertical lines assumed |
* by the calculation must be halved, as the computation calculates |
* the number of vertical lines per field. |
*/ |
if (interlaced) |
vdisplay_rnd = vdisplay / 2; |
else |
vdisplay_rnd = vdisplay; |
/* 3. Find the frame rate required: */ |
if (interlaced) |
vfieldrate_rqd = vrefresh * 2; |
else |
vfieldrate_rqd = vrefresh; |
/* 4. Find number of lines in Top margin: */ |
top_margin = 0; |
if (margins) |
top_margin = (vdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / |
1000; |
/* 5. Find number of lines in bottom margin: */ |
bottom_margin = top_margin; |
/* 6. If interlace is required, then set variable interlace: */ |
if (interlaced) |
interlace = 1; |
else |
interlace = 0; |
/* 7. Estimate the Horizontal frequency */ |
{ |
tmp1 = (1000000 - MIN_VSYNC_PLUS_BP * vfieldrate_rqd) / 500; |
tmp2 = (vdisplay_rnd + 2 * top_margin + GTF_MIN_V_PORCH) * |
2 + interlace; |
hfreq_est = (tmp2 * 1000 * vfieldrate_rqd) / tmp1; |
} |
/* 8. Find the number of lines in V sync + back porch */ |
/* [V SYNC+BP] = RINT(([MIN VSYNC+BP] * hfreq_est / 1000000)) */ |
vsync_plus_bp = MIN_VSYNC_PLUS_BP * hfreq_est / 1000; |
vsync_plus_bp = (vsync_plus_bp + 500) / 1000; |
/* 9. Find the number of lines in V back porch alone: */ |
vback_porch = vsync_plus_bp - V_SYNC_RQD; |
/* 10. Find the total number of lines in Vertical field period: */ |
vtotal_lines = vdisplay_rnd + top_margin + bottom_margin + |
vsync_plus_bp + GTF_MIN_V_PORCH; |
/* 11. Estimate the Vertical field frequency: */ |
vfieldrate_est = hfreq_est / vtotal_lines; |
/* 12. Find the actual horizontal period: */ |
hperiod = 1000000 / (vfieldrate_rqd * vtotal_lines); |
/* 13. Find the actual Vertical field frequency: */ |
vfield_rate = hfreq_est / vtotal_lines; |
/* 14. Find the Vertical frame frequency: */ |
if (interlaced) |
vframe_rate = vfield_rate / 2; |
else |
vframe_rate = vfield_rate; |
/* 15. Find number of pixels in left margin: */ |
if (margins) |
left_margin = (hdisplay_rnd * GTF_MARGIN_PERCENTAGE + 500) / |
1000; |
else |
left_margin = 0; |
/* 16.Find number of pixels in right margin: */ |
right_margin = left_margin; |
/* 17.Find total number of active pixels in image and left and right */ |
total_active_pixels = hdisplay_rnd + left_margin + right_margin; |
/* 18.Find the ideal blanking duty cycle from blanking duty cycle */ |
ideal_duty_cycle = GTF_C_PRIME * 1000 - |
(GTF_M_PRIME * 1000000 / hfreq_est); |
/* 19.Find the number of pixels in the blanking time to the nearest |
* double character cell: */ |
hblank = total_active_pixels * ideal_duty_cycle / |
(100000 - ideal_duty_cycle); |
hblank = (hblank + GTF_CELL_GRAN) / (2 * GTF_CELL_GRAN); |
hblank = hblank * 2 * GTF_CELL_GRAN; |
/* 20.Find total number of pixels: */ |
total_pixels = total_active_pixels + hblank; |
/* 21.Find pixel clock frequency: */ |
pixel_freq = total_pixels * hfreq_est / 1000; |
/* Stage 1 computations are now complete; I should really pass |
* the results to another function and do the Stage 2 computations, |
* but I only need a few more values so I'll just append the |
* computations here for now */ |
/* 17. Find the number of pixels in the horizontal sync period: */ |
hsync = H_SYNC_PERCENT * total_pixels / 100; |
hsync = (hsync + GTF_CELL_GRAN / 2) / GTF_CELL_GRAN; |
hsync = hsync * GTF_CELL_GRAN; |
/* 18. Find the number of pixels in horizontal front porch period */ |
hfront_porch = hblank / 2 - hsync; |
/* 36. Find the number of lines in the odd front porch period: */ |
vodd_front_porch_lines = GTF_MIN_V_PORCH ; |
/* finally, pack the results in the mode struct */ |
drm_mode->hdisplay = hdisplay_rnd; |
drm_mode->hsync_start = hdisplay_rnd + hfront_porch; |
drm_mode->hsync_end = drm_mode->hsync_start + hsync; |
drm_mode->htotal = total_pixels; |
drm_mode->vdisplay = vdisplay_rnd; |
drm_mode->vsync_start = vdisplay_rnd + vodd_front_porch_lines; |
drm_mode->vsync_end = drm_mode->vsync_start + V_SYNC_RQD; |
drm_mode->vtotal = vtotal_lines; |
drm_mode->clock = pixel_freq; |
drm_mode_set_name(drm_mode); |
drm_mode->flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC; |
if (interlaced) { |
drm_mode->vtotal *= 2; |
drm_mode->flags |= DRM_MODE_FLAG_INTERLACE; |
} |
return drm_mode; |
} |
EXPORT_SYMBOL(drm_gtf_mode); |
/** |
* drm_mode_set_name - set the name on a mode |
* @mode: name will be set in this mode |
* |
566,9 → 151,7 |
* FIXME: why is this needed? shouldn't vrefresh be set already? |
* |
* RETURNS: |
* Vertical refresh rate. It will be the result of actual value plus 0.5. |
* If it is 70.288, it will return 70Hz. |
* If it is 59.6, it will return 60Hz. |
* Vertical refresh rate of @mode x 1000. For precision reasons. |
*/ |
int drm_mode_vrefresh(struct drm_display_mode *mode) |
{ |
578,13 → 161,14 |
if (mode->vrefresh > 0) |
refresh = mode->vrefresh; |
else if (mode->htotal > 0 && mode->vtotal > 0) { |
int vtotal; |
vtotal = mode->vtotal; |
/* work out vrefresh the value will be x1000 */ |
calc_val = (mode->clock * 1000); |
calc_val /= mode->htotal; |
refresh = (calc_val + vtotal / 2) / vtotal; |
calc_val *= 1000; |
calc_val /= mode->vtotal; |
refresh = calc_val; |
if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
refresh *= 2; |
if (mode->flags & DRM_MODE_FLAG_DBLSCAN) |
819,7 → 403,8 |
list_del(&mode->head); |
if (verbose) { |
drm_mode_debug_printmodeline(mode); |
DRM_DEBUG_KMS("Not using %s mode %d\n", |
DRM_DEBUG_MODE(DRM_MODESET_DEBUG, |
"Not using %s mode %d\n", |
mode->name, mode->status); |
} |
drm_mode_destroy(dev, mode); |
981,8 → 566,6 |
found_it = 1; |
/* if equal delete the probed mode */ |
mode->status = pmode->status; |
/* Merge type bits together */ |
mode->type |= pmode->type; |
list_del(&pmode->head); |
drm_mode_destroy(connector->dev, pmode); |
break; |